regmap: Updates for v6.13

The main thing for regmap this time around is some improvements of the
 lockdep annotations which stop some false positives.  We also have one
 new helper for setting a bitmask to the same value, and several test
 improvements.
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCgAdFiEEreZoqmdXGLWf4p/qJNaLcl1Uh9AFAmc7OlQACgkQJNaLcl1U
 h9B9nAf5AQIZ8AddQuudV4SoZbS2TWoGOOgjEXQnY245C4kNMnwJ1rIbHu7WJ/OE
 oVt7ePo7HW0CzpwIrNvdlV+9J3b+XR6xA1/rAJqI+TTPM3FMn33XGZ+0r+ZDVkBT
 83/ZwLDPbRIVhwUgyHl0dIb5/pJddYVmJEDFHmRPY9Z8QQ4WPQn4SZvTfvF4y16C
 CJAV58A7Ei0MmnrJrGV3lF00qpkWMxdlpJu8TYgC1hM/hv9LAvVMEijuCpNR9NR5
 udP+jPe2kA+IIlXEfvoxnJ/x9BgSf6CPLYV2nugFZsdGAfhI8EB3v/SC7gqlGVR7
 tu4TR55KIh+lthRyD2uFRrI5GlzFvw==
 =epDO
 -----END PGP SIGNATURE-----

Merge tag 'regmap-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regmap

Pull regmap updates from Mark Brown:
 "The main thing for regmap this time around is some improvements of the
  lockdep annotations which stop some false positives. We also have one
  new helper for setting a bitmask to the same value, and several test
  improvements"

* tag 'regmap-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regmap:
  regmap: provide regmap_assign_bits()
  regmap: irq: Set lockdep class for hierarchical IRQ domains
  regmap: maple: Provide lockdep (sub)class for maple tree's internal lock
  regmap: kunit: Fix repeated test param
  regcache: Improve documentation of available cache types
  regmap: Specifically test writing 0 as a value to sparse caches
  regmap-irq: Consistently use memset32() in regmap_irq_thread()
This commit is contained in:
Linus Torvalds 2024-11-20 12:09:47 -08:00
commit 37c7d3538a
6 changed files with 78 additions and 6 deletions

View File

@ -59,6 +59,7 @@ struct regmap {
unsigned long raw_spinlock_flags; unsigned long raw_spinlock_flags;
}; };
}; };
struct lock_class_key *lock_key;
regmap_lock lock; regmap_lock lock;
regmap_unlock unlock; regmap_unlock unlock;
void *lock_arg; /* This is passed to lock/unlock functions */ void *lock_arg; /* This is passed to lock/unlock functions */

View File

@ -355,6 +355,9 @@ static int regcache_maple_init(struct regmap *map)
mt_init(mt); mt_init(mt);
if (!mt_external_lock(mt) && map->lock_key)
lockdep_set_class_and_subclass(&mt->ma_lock, map->lock_key, 1);
if (!map->num_reg_defaults) if (!map->num_reg_defaults)
return 0; return 0;

View File

@ -364,14 +364,11 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
memset32(data->status_buf, GENMASK(31, 0), chip->num_regs); memset32(data->status_buf, GENMASK(31, 0), chip->num_regs);
} else if (chip->num_main_regs) { } else if (chip->num_main_regs) {
unsigned int max_main_bits; unsigned int max_main_bits;
unsigned long size;
size = chip->num_regs * sizeof(unsigned int);
max_main_bits = (chip->num_main_status_bits) ? max_main_bits = (chip->num_main_status_bits) ?
chip->num_main_status_bits : chip->num_regs; chip->num_main_status_bits : chip->num_regs;
/* Clear the status buf as we don't read all status regs */ /* Clear the status buf as we don't read all status regs */
memset(data->status_buf, 0, size); memset32(data->status_buf, 0, chip->num_regs);
/* We could support bulk read for main status registers /* We could support bulk read for main status registers
* but I don't expect to see devices with really many main * but I don't expect to see devices with really many main
@ -514,12 +511,16 @@ exit:
return IRQ_NONE; return IRQ_NONE;
} }
static struct lock_class_key regmap_irq_lock_class;
static struct lock_class_key regmap_irq_request_class;
static int regmap_irq_map(struct irq_domain *h, unsigned int virq, static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw) irq_hw_number_t hw)
{ {
struct regmap_irq_chip_data *data = h->host_data; struct regmap_irq_chip_data *data = h->host_data;
irq_set_chip_data(virq, data); irq_set_chip_data(virq, data);
irq_set_lockdep_class(virq, &regmap_irq_lock_class, &regmap_irq_request_class);
irq_set_chip(virq, &data->irq_chip); irq_set_chip(virq, &data->irq_chip);
irq_set_nested_thread(virq, 1); irq_set_nested_thread(virq, 1);
irq_set_parent(virq, data->irq); irq_set_parent(virq, data->irq);

View File

@ -126,7 +126,7 @@ static const struct regmap_test_param real_cache_types_list[] = {
{ .cache = REGCACHE_RBTREE, .from_reg = 0x2003 }, { .cache = REGCACHE_RBTREE, .from_reg = 0x2003 },
{ .cache = REGCACHE_RBTREE, .from_reg = 0x2004 }, { .cache = REGCACHE_RBTREE, .from_reg = 0x2004 },
{ .cache = REGCACHE_MAPLE, .from_reg = 0 }, { .cache = REGCACHE_MAPLE, .from_reg = 0 },
{ .cache = REGCACHE_RBTREE, .from_reg = 0, .fast_io = true }, { .cache = REGCACHE_MAPLE, .from_reg = 0, .fast_io = true },
{ .cache = REGCACHE_MAPLE, .from_reg = 0x2001 }, { .cache = REGCACHE_MAPLE, .from_reg = 0x2001 },
{ .cache = REGCACHE_MAPLE, .from_reg = 0x2002 }, { .cache = REGCACHE_MAPLE, .from_reg = 0x2002 },
{ .cache = REGCACHE_MAPLE, .from_reg = 0x2003 }, { .cache = REGCACHE_MAPLE, .from_reg = 0x2003 },
@ -1499,6 +1499,48 @@ static void cache_present(struct kunit *test)
KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, param->from_reg + i)); KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, param->from_reg + i));
} }
static void cache_write_zero(struct kunit *test)
{
const struct regmap_test_param *param = test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
unsigned int val;
int i;
config = test_regmap_config;
map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
for (i = 0; i < BLOCK_TEST_SIZE; i++)
data->read[param->from_reg + i] = false;
/* No defaults so no registers cached. */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, param->from_reg + i));
/* We didn't trigger any reads */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_ASSERT_FALSE(test, data->read[param->from_reg + i]);
/* Write a zero value */
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 1, 0));
/* Read that zero value back */
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 1, &val));
KUNIT_EXPECT_EQ(test, 0, val);
/* From the cache? */
KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, 1));
/* Try to throw it away */
KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 1, 1));
KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, 1));
}
/* Check that caching the window register works with sync */ /* Check that caching the window register works with sync */
static void cache_range_window_reg(struct kunit *test) static void cache_range_window_reg(struct kunit *test)
{ {
@ -2012,6 +2054,7 @@ static struct kunit_case regmap_test_cases[] = {
KUNIT_CASE_PARAM(cache_drop_all_and_sync_no_defaults, sparse_cache_types_gen_params), KUNIT_CASE_PARAM(cache_drop_all_and_sync_no_defaults, sparse_cache_types_gen_params),
KUNIT_CASE_PARAM(cache_drop_all_and_sync_has_defaults, sparse_cache_types_gen_params), KUNIT_CASE_PARAM(cache_drop_all_and_sync_has_defaults, sparse_cache_types_gen_params),
KUNIT_CASE_PARAM(cache_present, sparse_cache_types_gen_params), KUNIT_CASE_PARAM(cache_present, sparse_cache_types_gen_params),
KUNIT_CASE_PARAM(cache_write_zero, sparse_cache_types_gen_params),
KUNIT_CASE_PARAM(cache_range_window_reg, real_cache_types_only_gen_params), KUNIT_CASE_PARAM(cache_range_window_reg, real_cache_types_only_gen_params),
KUNIT_CASE_PARAM(raw_read_defaults_single, raw_test_types_gen_params), KUNIT_CASE_PARAM(raw_read_defaults_single, raw_test_types_gen_params),

View File

@ -745,6 +745,7 @@ struct regmap *__regmap_init(struct device *dev,
lock_key, lock_name); lock_key, lock_name);
} }
map->lock_arg = map; map->lock_arg = map;
map->lock_key = lock_key;
} }
/* /*

View File

@ -54,7 +54,14 @@ struct sdw_slave;
#define REGMAP_UPSHIFT(s) (-(s)) #define REGMAP_UPSHIFT(s) (-(s))
#define REGMAP_DOWNSHIFT(s) (s) #define REGMAP_DOWNSHIFT(s) (s)
/* An enum of all the supported cache types */ /*
* The supported cache types, the default is no cache. Any new caches
* should usually use the maple tree cache unless they specifically
* require that there are never any allocations at runtime and can't
* provide defaults in which case they should use the flat cache. The
* rbtree cache *may* have some performance advantage for very low end
* systems that make heavy use of cache syncs but is mainly legacy.
*/
enum regcache_type { enum regcache_type {
REGCACHE_NONE, REGCACHE_NONE,
REGCACHE_RBTREE, REGCACHE_RBTREE,
@ -1328,6 +1335,15 @@ static inline int regmap_clear_bits(struct regmap *map,
return regmap_update_bits_base(map, reg, bits, 0, NULL, false, false); return regmap_update_bits_base(map, reg, bits, 0, NULL, false, false);
} }
static inline int regmap_assign_bits(struct regmap *map, unsigned int reg,
unsigned int bits, bool value)
{
if (value)
return regmap_set_bits(map, reg, bits);
else
return regmap_clear_bits(map, reg, bits);
}
int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits); int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits);
/** /**
@ -1796,6 +1812,13 @@ static inline int regmap_clear_bits(struct regmap *map,
return -EINVAL; return -EINVAL;
} }
static inline int regmap_assign_bits(struct regmap *map, unsigned int reg,
unsigned int bits, bool value)
{
WARN_ONCE(1, "regmap API is disabled");
return -EINVAL;
}
static inline int regmap_test_bits(struct regmap *map, static inline int regmap_test_bits(struct regmap *map,
unsigned int reg, unsigned int bits) unsigned int reg, unsigned int bits)
{ {