forked from Minki/linux
staging/android: Initial partial kernel-doc for ashmem.c
I am beginning to understand the core concepts at play here. I am nowhere near finished with this class - However, it is better if I commit what I have documented so far tonight - That way, if I mess up tomorrow morning, I can just roll back to here. Sorry if this clutters things up. In the end, once *everything* is documented, it will make understanding the Android staging driver easier to understand as a programmer - Hopefully for both new developers and current ones. Signed-off-by: Cruz Julian Bishop <cruzjbishop@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
fb51b500a9
commit
4d2c9d5ddc
@ -37,41 +37,59 @@
|
|||||||
#define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1)
|
#define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1)
|
||||||
#define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN)
|
#define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN)
|
||||||
|
|
||||||
/*
|
/**
|
||||||
* ashmem_area - anonymous shared memory area
|
* struct ashmem_area - The anonymous shared memory area
|
||||||
* Lifecycle: From our parent file's open() until its release()
|
* @name: The optional name in /proc/pid/maps
|
||||||
* Locking: Protected by `ashmem_mutex'
|
* @unpinned_list: The list of all ashmem areas
|
||||||
* Big Note: Mappings do NOT pin this structure; it dies on close()
|
* @file: The shmem-based backing file
|
||||||
|
* @size: The size of the mapping, in bytes
|
||||||
|
* @prot_masks: The allowed protection bits, as vm_flags
|
||||||
|
*
|
||||||
|
* The lifecycle of this structure is from our parent file's open() until
|
||||||
|
* its release(). It is also protected by 'ashmem_mutex'
|
||||||
|
*
|
||||||
|
* Warning: Mappings do NOT pin this structure; It dies on close()
|
||||||
*/
|
*/
|
||||||
struct ashmem_area {
|
struct ashmem_area {
|
||||||
char name[ASHMEM_FULL_NAME_LEN]; /* optional name in /proc/pid/maps */
|
char name[ASHMEM_FULL_NAME_LEN];
|
||||||
struct list_head unpinned_list; /* list of all ashmem areas */
|
struct list_head unpinned_list;
|
||||||
struct file *file; /* the shmem-based backing file */
|
struct file *file;
|
||||||
size_t size; /* size of the mapping, in bytes */
|
size_t size;
|
||||||
unsigned long prot_mask; /* allowed prot bits, as vm_flags */
|
unsigned long prot_mask;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/**
|
||||||
* ashmem_range - represents an interval of unpinned (evictable) pages
|
* struct ashmem_range - A range of unpinned/evictable pages
|
||||||
* Lifecycle: From unpin to pin
|
* @lru: The entry in the LRU list
|
||||||
* Locking: Protected by `ashmem_mutex'
|
* @unpinned: The entry in its area's unpinned list
|
||||||
|
* @asma: The associated anonymous shared memory area.
|
||||||
|
* @pgstart: The starting page (inclusive)
|
||||||
|
* @pgend: The ending page (inclusive)
|
||||||
|
* @purged: The purge status (ASHMEM_NOT or ASHMEM_WAS_PURGED)
|
||||||
|
*
|
||||||
|
* The lifecycle of this structure is from unpin to pin.
|
||||||
|
* It is protected by 'ashmem_mutex'
|
||||||
*/
|
*/
|
||||||
struct ashmem_range {
|
struct ashmem_range {
|
||||||
struct list_head lru; /* entry in LRU list */
|
struct list_head lru;
|
||||||
struct list_head unpinned; /* entry in its area's unpinned list */
|
struct list_head unpinned;
|
||||||
struct ashmem_area *asma; /* associated area */
|
struct ashmem_area *asma;
|
||||||
size_t pgstart; /* starting page, inclusive */
|
size_t pgstart;
|
||||||
size_t pgend; /* ending page, inclusive */
|
size_t pgend;
|
||||||
unsigned int purged; /* ASHMEM_NOT or ASHMEM_WAS_PURGED */
|
unsigned int purged;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* LRU list of unpinned pages, protected by ashmem_mutex */
|
/* LRU list of unpinned pages, protected by ashmem_mutex */
|
||||||
static LIST_HEAD(ashmem_lru_list);
|
static LIST_HEAD(ashmem_lru_list);
|
||||||
|
|
||||||
/* Count of pages on our LRU list, protected by ashmem_mutex */
|
/**
|
||||||
|
* long lru_count - The count of pages on our LRU list.
|
||||||
|
*
|
||||||
|
* This is protected by ashmem_mutex.
|
||||||
|
*/
|
||||||
static unsigned long lru_count;
|
static unsigned long lru_count;
|
||||||
|
|
||||||
/*
|
/**
|
||||||
* ashmem_mutex - protects the list of and each individual ashmem_area
|
* ashmem_mutex - protects the list of and each individual ashmem_area
|
||||||
*
|
*
|
||||||
* Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem
|
* Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem
|
||||||
@ -105,28 +123,43 @@ static struct kmem_cache *ashmem_range_cachep __read_mostly;
|
|||||||
|
|
||||||
#define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE)
|
#define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* lru_add() - Adds a range of memory to the LRU list
|
||||||
|
* @range: The memory range being added.
|
||||||
|
*
|
||||||
|
* The range is first added to the end (tail) of the LRU list.
|
||||||
|
* After this, the size of the range is added to @lru_count
|
||||||
|
*/
|
||||||
static inline void lru_add(struct ashmem_range *range)
|
static inline void lru_add(struct ashmem_range *range)
|
||||||
{
|
{
|
||||||
list_add_tail(&range->lru, &ashmem_lru_list);
|
list_add_tail(&range->lru, &ashmem_lru_list);
|
||||||
lru_count += range_size(range);
|
lru_count += range_size(range);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* lru_del() - Removes a range of memory from the LRU list
|
||||||
|
* @range: The memory range being removed
|
||||||
|
*
|
||||||
|
* The range is first deleted from the LRU list.
|
||||||
|
* After this, the size of the range is removed from @lru_count
|
||||||
|
*/
|
||||||
static inline void lru_del(struct ashmem_range *range)
|
static inline void lru_del(struct ashmem_range *range)
|
||||||
{
|
{
|
||||||
list_del(&range->lru);
|
list_del(&range->lru);
|
||||||
lru_count -= range_size(range);
|
lru_count -= range_size(range);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/**
|
||||||
* range_alloc - allocate and initialize a new ashmem_range structure
|
* range_alloc() - Allocates and initializes a new ashmem_range structure
|
||||||
|
* @asma: The associated ashmem_area
|
||||||
|
* @prev_range: The previous ashmem_range in the sorted asma->unpinned list
|
||||||
|
* @purged: Initial purge status (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED)
|
||||||
|
* @start: The starting page (inclusive)
|
||||||
|
* @end: The ending page (inclusive)
|
||||||
*
|
*
|
||||||
* 'asma' - associated ashmem_area
|
* This function is protected by ashmem_mutex.
|
||||||
* 'prev_range' - the previous ashmem_range in the sorted asma->unpinned list
|
|
||||||
* 'purged' - initial purge value (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED)
|
|
||||||
* 'start' - starting page, inclusive
|
|
||||||
* 'end' - ending page, inclusive
|
|
||||||
*
|
*
|
||||||
* Caller must hold ashmem_mutex.
|
* Return: 0 if successful, or -ENOMEM if there is an error
|
||||||
*/
|
*/
|
||||||
static int range_alloc(struct ashmem_area *asma,
|
static int range_alloc(struct ashmem_area *asma,
|
||||||
struct ashmem_range *prev_range, unsigned int purged,
|
struct ashmem_range *prev_range, unsigned int purged,
|
||||||
@ -151,6 +184,10 @@ static int range_alloc(struct ashmem_area *asma,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* range_del() - Deletes and dealloctes an ashmem_range structure
|
||||||
|
* @range: The associated ashmem_range that has previously been allocated
|
||||||
|
*/
|
||||||
static void range_del(struct ashmem_range *range)
|
static void range_del(struct ashmem_range *range)
|
||||||
{
|
{
|
||||||
list_del(&range->unpinned);
|
list_del(&range->unpinned);
|
||||||
|
Loading…
Reference in New Issue
Block a user