u-boot/drivers/block/blkcache.c
Simon Glass 5ea894ac42 dm: test: Clear the block cache after running a test
Some tests access data in block devices and so cause the cache to fill
up. This results in memory being allocated.

Some tests check the malloc usage at the beginning and then again at the
end, to ensure there is no memory leak caused by the test. The block cache
makes this difficult, since the any test may cause entries to be allocated
or even freed, if the cache becomes full.

It is simpler to clear the block cache after each test. This ensures that
it will not introduce noise in tests which check malloc usage.

Add the logic to clear the cache, using the existing blkcache_invalidate()
function. Drop the duplicate code at the same time.

Signed-off-by: Simon Glass <sjg@chromium.org>
2022-11-07 16:24:30 -07:00

188 lines
3.9 KiB
C

// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) Nelson Integration, LLC 2016
* Author: Eric Nelson<eric@nelint.com>
*
*/
#include <common.h>
#include <blk.h>
#include <log.h>
#include <malloc.h>
#include <part.h>
#include <asm/global_data.h>
#include <linux/ctype.h>
#include <linux/list.h>
#ifdef CONFIG_NEEDS_MANUAL_RELOC
DECLARE_GLOBAL_DATA_PTR;
#endif
struct block_cache_node {
struct list_head lh;
int iftype;
int devnum;
lbaint_t start;
lbaint_t blkcnt;
unsigned long blksz;
char *cache;
};
static LIST_HEAD(block_cache);
static struct block_cache_stats _stats = {
.max_blocks_per_entry = 8,
.max_entries = 32
};
#ifdef CONFIG_NEEDS_MANUAL_RELOC
int blkcache_init(void)
{
struct list_head *head = &block_cache;
head->next = (uintptr_t)head->next + gd->reloc_off;
head->prev = (uintptr_t)head->prev + gd->reloc_off;
return 0;
}
#endif
static struct block_cache_node *cache_find(int iftype, int devnum,
lbaint_t start, lbaint_t blkcnt,
unsigned long blksz)
{
struct block_cache_node *node;
list_for_each_entry(node, &block_cache, lh)
if ((node->iftype == iftype) &&
(node->devnum == devnum) &&
(node->blksz == blksz) &&
(node->start <= start) &&
(node->start + node->blkcnt >= start + blkcnt)) {
if (block_cache.next != &node->lh) {
/* maintain MRU ordering */
list_del(&node->lh);
list_add(&node->lh, &block_cache);
}
return node;
}
return 0;
}
int blkcache_read(int iftype, int devnum,
lbaint_t start, lbaint_t blkcnt,
unsigned long blksz, void *buffer)
{
struct block_cache_node *node = cache_find(iftype, devnum, start,
blkcnt, blksz);
if (node) {
const char *src = node->cache + (start - node->start) * blksz;
memcpy(buffer, src, blksz * blkcnt);
debug("hit: start " LBAF ", count " LBAFU "\n",
start, blkcnt);
++_stats.hits;
return 1;
}
debug("miss: start " LBAF ", count " LBAFU "\n",
start, blkcnt);
++_stats.misses;
return 0;
}
void blkcache_fill(int iftype, int devnum,
lbaint_t start, lbaint_t blkcnt,
unsigned long blksz, void const *buffer)
{
lbaint_t bytes;
struct block_cache_node *node;
/* don't cache big stuff */
if (blkcnt > _stats.max_blocks_per_entry)
return;
if (_stats.max_entries == 0)
return;
bytes = blksz * blkcnt;
if (_stats.max_entries <= _stats.entries) {
/* pop LRU */
node = (struct block_cache_node *)block_cache.prev;
list_del(&node->lh);
_stats.entries--;
debug("drop: start " LBAF ", count " LBAFU "\n",
node->start, node->blkcnt);
if (node->blkcnt * node->blksz < bytes) {
free(node->cache);
node->cache = 0;
}
} else {
node = malloc(sizeof(*node));
if (!node)
return;
node->cache = 0;
}
if (!node->cache) {
node->cache = malloc(bytes);
if (!node->cache) {
free(node);
return;
}
}
debug("fill: start " LBAF ", count " LBAFU "\n",
start, blkcnt);
node->iftype = iftype;
node->devnum = devnum;
node->start = start;
node->blkcnt = blkcnt;
node->blksz = blksz;
memcpy(node->cache, buffer, bytes);
list_add(&node->lh, &block_cache);
_stats.entries++;
}
void blkcache_invalidate(int iftype, int devnum)
{
struct list_head *entry, *n;
struct block_cache_node *node;
list_for_each_safe(entry, n, &block_cache) {
node = (struct block_cache_node *)entry;
if (iftype == -1 ||
(node->iftype == iftype && node->devnum == devnum)) {
list_del(entry);
free(node->cache);
free(node);
--_stats.entries;
}
}
}
void blkcache_configure(unsigned blocks, unsigned entries)
{
/* invalidate cache if there is a change */
if ((blocks != _stats.max_blocks_per_entry) ||
(entries != _stats.max_entries))
blkcache_invalidate(-1, 0);
_stats.max_blocks_per_entry = blocks;
_stats.max_entries = entries;
_stats.hits = 0;
_stats.misses = 0;
}
void blkcache_stats(struct block_cache_stats *stats)
{
memcpy(stats, &_stats, sizeof(*stats));
_stats.hits = 0;
_stats.misses = 0;
}
void blkcache_free(void)
{
blkcache_invalidate(-1, 0);
}