mirror of
https://github.com/torvalds/linux.git
synced 2024-12-20 01:52:13 +00:00
staging: tidspbridge: set7 remove hungarian from structs
hungarian notation will be removed from the elements inside structures, the next varibles will be renamed: Original: Replacement: ul_gpp_phys gpp_phys ul_gpp_read_pointer gpp_read_pointer ul_gpp_size gpp_size ul_gpp_va gpp_va ul_heap_size heap_size ul_internal_mem_size internal_mem_size ul_in_use_cnt in_use_cnt ul_len_max_free_block len_max_free_block ul_max max ul_min_block_size min_block_size ul_min min ul_mpu_addr mpu_addr ul_n_bytes bytes ul_num_alloc_blocks num_alloc_blocks ul_number_bytes number_bytes ul_num_chnls num_chnls ul_num_free_blocks num_free_blocks ul_num_gppsm_segs num_gppsm_segs ul_pos pos ul_reserved reserved Signed-off-by: Rene Sapiens <rene.sapiens@ti.com> Signed-off-by: Armando Uribe <x0095078@ti.com> Signed-off-by: Omar Ramirez Luna <omar.ramirez@ti.com>
This commit is contained in:
parent
dab7f7fee0
commit
6c66e948d2
@ -118,9 +118,9 @@ struct io_mgr {
|
||||
u32 ul_trace_buffer_begin; /* Trace message start address */
|
||||
u32 ul_trace_buffer_end; /* Trace message end address */
|
||||
u32 ul_trace_buffer_current; /* Trace message current address */
|
||||
u32 ul_gpp_read_pointer; /* GPP Read pointer to Trace buffer */
|
||||
u32 gpp_read_pointer; /* GPP Read pointer to Trace buffer */
|
||||
u8 *pmsg;
|
||||
u32 ul_gpp_va;
|
||||
u32 gpp_va;
|
||||
u32 dsp_va;
|
||||
#endif
|
||||
/* IO Dpc */
|
||||
@ -532,7 +532,7 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
|
||||
* This is the virtual uncached ioremapped
|
||||
* address!!!
|
||||
*/
|
||||
ae_proc[ndx].ul_gpp_va = gpp_va_curr;
|
||||
ae_proc[ndx].gpp_va = gpp_va_curr;
|
||||
ae_proc[ndx].dsp_va =
|
||||
va_curr / hio_mgr->word_size;
|
||||
ae_proc[ndx].ul_size = page_size[i];
|
||||
@ -542,7 +542,7 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
|
||||
dev_dbg(bridge, "shm MMU TLB entry PA %x"
|
||||
" VA %x DSP_VA %x Size %x\n",
|
||||
ae_proc[ndx].gpp_pa,
|
||||
ae_proc[ndx].ul_gpp_va,
|
||||
ae_proc[ndx].gpp_va,
|
||||
ae_proc[ndx].dsp_va *
|
||||
hio_mgr->word_size, page_size[i]);
|
||||
ndx++;
|
||||
@ -557,7 +557,7 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
|
||||
"shm MMU PTE entry PA %x"
|
||||
" VA %x DSP_VA %x Size %x\n",
|
||||
ae_proc[ndx].gpp_pa,
|
||||
ae_proc[ndx].ul_gpp_va,
|
||||
ae_proc[ndx].gpp_va,
|
||||
ae_proc[ndx].dsp_va *
|
||||
hio_mgr->word_size, page_size[i]);
|
||||
if (status)
|
||||
@ -580,12 +580,12 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
|
||||
* should not conflict with shm entries on MPU or DSP side.
|
||||
*/
|
||||
for (i = 3; i < 7 && ndx < BRDIOCTL_NUMOFMMUTLB; i++) {
|
||||
if (hio_mgr->ext_proc_info.ty_tlb[i].ul_gpp_phys == 0)
|
||||
if (hio_mgr->ext_proc_info.ty_tlb[i].gpp_phys == 0)
|
||||
continue;
|
||||
|
||||
if ((hio_mgr->ext_proc_info.ty_tlb[i].ul_gpp_phys >
|
||||
if ((hio_mgr->ext_proc_info.ty_tlb[i].gpp_phys >
|
||||
ul_gpp_pa - 0x100000
|
||||
&& hio_mgr->ext_proc_info.ty_tlb[i].ul_gpp_phys <=
|
||||
&& hio_mgr->ext_proc_info.ty_tlb[i].gpp_phys <=
|
||||
ul_gpp_pa + ul_seg_size)
|
||||
|| (hio_mgr->ext_proc_info.ty_tlb[i].dsp_virt >
|
||||
ul_dsp_va - 0x100000 / hio_mgr->word_size
|
||||
@ -595,7 +595,7 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
|
||||
"CDB MMU entry %d conflicts with "
|
||||
"shm.\n\tCDB: GppPa %x, DspVa %x.\n\tSHM: "
|
||||
"GppPa %x, DspVa %x, Bytes %x.\n", i,
|
||||
hio_mgr->ext_proc_info.ty_tlb[i].ul_gpp_phys,
|
||||
hio_mgr->ext_proc_info.ty_tlb[i].gpp_phys,
|
||||
hio_mgr->ext_proc_info.ty_tlb[i].dsp_virt,
|
||||
ul_gpp_pa, ul_dsp_va, ul_seg_size);
|
||||
status = -EPERM;
|
||||
@ -606,8 +606,8 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
|
||||
dsp_virt;
|
||||
ae_proc[ndx].gpp_pa =
|
||||
hio_mgr->ext_proc_info.ty_tlb[i].
|
||||
ul_gpp_phys;
|
||||
ae_proc[ndx].ul_gpp_va = 0;
|
||||
gpp_phys;
|
||||
ae_proc[ndx].gpp_va = 0;
|
||||
/* 1 MB */
|
||||
ae_proc[ndx].ul_size = 0x100000;
|
||||
dev_dbg(bridge, "shm MMU entry PA %x "
|
||||
@ -618,7 +618,7 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
|
||||
status = hio_mgr->intf_fxns->brd_mem_map
|
||||
(hio_mgr->hbridge_context,
|
||||
hio_mgr->ext_proc_info.ty_tlb[i].
|
||||
ul_gpp_phys,
|
||||
gpp_phys,
|
||||
hio_mgr->ext_proc_info.ty_tlb[i].
|
||||
dsp_virt, 0x100000, map_attrs,
|
||||
NULL);
|
||||
@ -649,7 +649,7 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
|
||||
for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) {
|
||||
ae_proc[i].dsp_va = 0;
|
||||
ae_proc[i].gpp_pa = 0;
|
||||
ae_proc[i].ul_gpp_va = 0;
|
||||
ae_proc[i].gpp_va = 0;
|
||||
ae_proc[i].ul_size = 0;
|
||||
}
|
||||
/*
|
||||
@ -657,14 +657,14 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
|
||||
* to the virtual uncached ioremapped address of shm reserved
|
||||
* on MPU.
|
||||
*/
|
||||
hio_mgr->ext_proc_info.ty_tlb[0].ul_gpp_phys =
|
||||
hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys =
|
||||
(ul_gpp_va + ul_seg1_size + ul_pad_size);
|
||||
|
||||
/*
|
||||
* Need shm Phys addr. IO supports only one DSP for now:
|
||||
* num_procs = 1.
|
||||
*/
|
||||
if (!hio_mgr->ext_proc_info.ty_tlb[0].ul_gpp_phys || num_procs != 1) {
|
||||
if (!hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys || num_procs != 1) {
|
||||
status = -EFAULT;
|
||||
goto func_end;
|
||||
} else {
|
||||
@ -688,7 +688,7 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
|
||||
ae_proc);
|
||||
if (status)
|
||||
goto func_end;
|
||||
ul_shm_base = hio_mgr->ext_proc_info.ty_tlb[0].ul_gpp_phys;
|
||||
ul_shm_base = hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys;
|
||||
ul_shm_base += ul_shm_base_offset;
|
||||
ul_shm_base = (u32) MEM_LINEAR_ADDRESS((void *)ul_shm_base,
|
||||
ul_mem_length);
|
||||
@ -740,7 +740,7 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
|
||||
goto func_end;
|
||||
}
|
||||
|
||||
hio_mgr->ul_gpp_read_pointer = hio_mgr->ul_trace_buffer_begin =
|
||||
hio_mgr->gpp_read_pointer = hio_mgr->ul_trace_buffer_begin =
|
||||
(ul_gpp_va + ul_seg1_size + ul_pad_size) +
|
||||
(hio_mgr->ul_trace_buffer_begin - ul_dsp_va);
|
||||
/* Get the end address of trace buffer */
|
||||
@ -772,7 +772,7 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
|
||||
status = -ENOMEM;
|
||||
|
||||
hio_mgr->dsp_va = ul_dsp_va;
|
||||
hio_mgr->ul_gpp_va = (ul_gpp_va + ul_seg1_size + ul_pad_size);
|
||||
hio_mgr->gpp_va = (ul_gpp_va + ul_seg1_size + ul_pad_size);
|
||||
|
||||
#endif
|
||||
func_end:
|
||||
@ -1541,7 +1541,7 @@ static int register_shm_segs(struct io_mgr *hio_mgr,
|
||||
goto func_end;
|
||||
}
|
||||
/* First TLB entry reserved for Bridge SM use. */
|
||||
ul_gpp_phys = hio_mgr->ext_proc_info.ty_tlb[0].ul_gpp_phys;
|
||||
ul_gpp_phys = hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys;
|
||||
/* Get size in bytes */
|
||||
ul_dsp_virt =
|
||||
hio_mgr->ext_proc_info.ty_tlb[0].dsp_virt *
|
||||
@ -1693,48 +1693,48 @@ void print_dsp_debug_trace(struct io_mgr *hio_mgr)
|
||||
ul_gpp_cur_pointer =
|
||||
*(u32 *) (hio_mgr->ul_trace_buffer_current);
|
||||
ul_gpp_cur_pointer =
|
||||
hio_mgr->ul_gpp_va + (ul_gpp_cur_pointer -
|
||||
hio_mgr->gpp_va + (ul_gpp_cur_pointer -
|
||||
hio_mgr->dsp_va);
|
||||
|
||||
/* No new debug messages available yet */
|
||||
if (ul_gpp_cur_pointer == hio_mgr->ul_gpp_read_pointer) {
|
||||
if (ul_gpp_cur_pointer == hio_mgr->gpp_read_pointer) {
|
||||
break;
|
||||
} else if (ul_gpp_cur_pointer > hio_mgr->ul_gpp_read_pointer) {
|
||||
} else if (ul_gpp_cur_pointer > hio_mgr->gpp_read_pointer) {
|
||||
/* Continuous data */
|
||||
ul_new_message_length =
|
||||
ul_gpp_cur_pointer - hio_mgr->ul_gpp_read_pointer;
|
||||
ul_gpp_cur_pointer - hio_mgr->gpp_read_pointer;
|
||||
|
||||
memcpy(hio_mgr->pmsg,
|
||||
(char *)hio_mgr->ul_gpp_read_pointer,
|
||||
(char *)hio_mgr->gpp_read_pointer,
|
||||
ul_new_message_length);
|
||||
hio_mgr->pmsg[ul_new_message_length] = '\0';
|
||||
/*
|
||||
* Advance the GPP trace pointer to DSP current
|
||||
* pointer.
|
||||
*/
|
||||
hio_mgr->ul_gpp_read_pointer += ul_new_message_length;
|
||||
hio_mgr->gpp_read_pointer += ul_new_message_length;
|
||||
/* Print the trace messages */
|
||||
pr_info("DSPTrace: %s\n", hio_mgr->pmsg);
|
||||
} else if (ul_gpp_cur_pointer < hio_mgr->ul_gpp_read_pointer) {
|
||||
} else if (ul_gpp_cur_pointer < hio_mgr->gpp_read_pointer) {
|
||||
/* Handle trace buffer wraparound */
|
||||
memcpy(hio_mgr->pmsg,
|
||||
(char *)hio_mgr->ul_gpp_read_pointer,
|
||||
(char *)hio_mgr->gpp_read_pointer,
|
||||
hio_mgr->ul_trace_buffer_end -
|
||||
hio_mgr->ul_gpp_read_pointer);
|
||||
hio_mgr->gpp_read_pointer);
|
||||
ul_new_message_length =
|
||||
ul_gpp_cur_pointer - hio_mgr->ul_trace_buffer_begin;
|
||||
memcpy(&hio_mgr->pmsg[hio_mgr->ul_trace_buffer_end -
|
||||
hio_mgr->ul_gpp_read_pointer],
|
||||
hio_mgr->gpp_read_pointer],
|
||||
(char *)hio_mgr->ul_trace_buffer_begin,
|
||||
ul_new_message_length);
|
||||
hio_mgr->pmsg[hio_mgr->ul_trace_buffer_end -
|
||||
hio_mgr->ul_gpp_read_pointer +
|
||||
hio_mgr->gpp_read_pointer +
|
||||
ul_new_message_length] = '\0';
|
||||
/*
|
||||
* Advance the GPP trace pointer to DSP current
|
||||
* pointer.
|
||||
*/
|
||||
hio_mgr->ul_gpp_read_pointer =
|
||||
hio_mgr->gpp_read_pointer =
|
||||
hio_mgr->ul_trace_buffer_begin +
|
||||
ul_new_message_length;
|
||||
/* Print the trace messages */
|
||||
|
@ -406,7 +406,7 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
|
||||
ul_shm_offset_virt =
|
||||
ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE);
|
||||
/* Kernel logical address */
|
||||
ul_shm_base = dev_context->atlb_entry[0].ul_gpp_va + ul_shm_offset_virt;
|
||||
ul_shm_base = dev_context->atlb_entry[0].gpp_va + ul_shm_offset_virt;
|
||||
|
||||
DBC_ASSERT(ul_shm_base != 0);
|
||||
/* 2nd wd is used as sync field */
|
||||
|
@ -137,7 +137,7 @@ int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt,
|
||||
dev_context->atlb_entry[0].dsp_va * DSPWORDSIZE;
|
||||
DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
|
||||
dw_ext_prog_virt_mem =
|
||||
dev_context->atlb_entry[0].ul_gpp_va;
|
||||
dev_context->atlb_entry[0].gpp_va;
|
||||
|
||||
if (!trace_read) {
|
||||
ul_shm_offset_virt =
|
||||
@ -337,7 +337,7 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context,
|
||||
ul_shm_base_virt - ul_tlb_base_virt;
|
||||
if (trace_load) {
|
||||
dw_ext_prog_virt_mem =
|
||||
dev_context->atlb_entry[0].ul_gpp_va;
|
||||
dev_context->atlb_entry[0].gpp_va;
|
||||
} else {
|
||||
dw_ext_prog_virt_mem = host_res->mem_base[1];
|
||||
dw_ext_prog_virt_mem +=
|
||||
|
@ -81,7 +81,7 @@ extern void *cmm_calloc_buf(struct cmm_object *hcmm_mgr,
|
||||
* Requires:
|
||||
* cmm_init(void) called.
|
||||
* ph_cmm_mgr != NULL.
|
||||
* mgr_attrts->ul_min_block_size >= 4 bytes.
|
||||
* mgr_attrts->min_block_size >= 4 bytes.
|
||||
* Ensures:
|
||||
*
|
||||
*/
|
||||
|
@ -23,13 +23,13 @@
|
||||
/* Cmm attributes used in cmm_create() */
|
||||
struct cmm_mgrattrs {
|
||||
/* Minimum SM allocation; default 32 bytes. */
|
||||
u32 ul_min_block_size;
|
||||
u32 min_block_size;
|
||||
};
|
||||
|
||||
/* Attributes for CMM_AllocBuf() & CMM_AllocDesc() */
|
||||
struct cmm_attrs {
|
||||
u32 ul_seg_id; /* 1,2... are SM segments. 0 is not. */
|
||||
u32 alignment; /* 0,1,2,4....ul_min_block_size */
|
||||
u32 alignment; /* 0,1,2,4....min_block_size */
|
||||
};
|
||||
|
||||
/*
|
||||
@ -55,11 +55,11 @@ struct cmm_seginfo {
|
||||
/* Total size in bytes of segment: DSP+GPP */
|
||||
u32 ul_total_seg_size;
|
||||
u32 gpp_base_pa; /* Start Phys addr of Gpp SM seg */
|
||||
u32 ul_gpp_size; /* Size of Gpp SM seg in bytes */
|
||||
u32 gpp_size; /* Size of Gpp SM seg in bytes */
|
||||
u32 dsp_base_va; /* DSP virt base byte address */
|
||||
u32 dsp_size; /* DSP seg size in bytes */
|
||||
/* # of current GPP allocations from this segment */
|
||||
u32 ul_in_use_cnt;
|
||||
u32 in_use_cnt;
|
||||
u32 seg_base_va; /* Start Virt address of SM seg */
|
||||
|
||||
};
|
||||
@ -67,11 +67,11 @@ struct cmm_seginfo {
|
||||
/* CMM useful information */
|
||||
struct cmm_info {
|
||||
/* # of SM segments registered with this Cmm. */
|
||||
u32 ul_num_gppsm_segs;
|
||||
u32 num_gppsm_segs;
|
||||
/* Total # of allocations outstanding for CMM */
|
||||
u32 ul_total_in_use_cnt;
|
||||
/* Min SM block size allocation from cmm_create() */
|
||||
u32 ul_min_block_size;
|
||||
u32 min_block_size;
|
||||
/* Info per registered SM segment. */
|
||||
struct cmm_seginfo seg_info[CMM_MAXGPPSEGS];
|
||||
};
|
||||
|
@ -210,9 +210,9 @@ enum dsp_flushtype {
|
||||
struct dsp_memstat {
|
||||
u32 ul_size;
|
||||
u32 ul_total_free_size;
|
||||
u32 ul_len_max_free_block;
|
||||
u32 ul_num_free_blocks;
|
||||
u32 ul_num_alloc_blocks;
|
||||
u32 len_max_free_block;
|
||||
u32 num_free_blocks;
|
||||
u32 num_alloc_blocks;
|
||||
};
|
||||
|
||||
/* Processor Load information Values */
|
||||
@ -276,7 +276,7 @@ struct dsp_streamconnect {
|
||||
};
|
||||
|
||||
struct dsp_nodeprofs {
|
||||
u32 ul_heap_size;
|
||||
u32 heap_size;
|
||||
};
|
||||
|
||||
/* The dsp_ndbprops structure reports the attributes of a node */
|
||||
@ -358,7 +358,7 @@ struct dsp_processorinfo {
|
||||
int processor_family;
|
||||
int processor_type;
|
||||
u32 clock_rate;
|
||||
u32 ul_internal_mem_size;
|
||||
u32 internal_mem_size;
|
||||
u32 external_mem_size;
|
||||
u32 processor_id;
|
||||
int ty_running_rtos;
|
||||
@ -425,7 +425,7 @@ struct dsp_streaminfo {
|
||||
u32 cb_struct;
|
||||
u32 number_bufs_allowed;
|
||||
u32 number_bufs_in_stream;
|
||||
u32 ul_number_bytes;
|
||||
u32 number_bytes;
|
||||
void *sync_object_handle;
|
||||
enum dsp_streamstate ss_stream_state;
|
||||
};
|
||||
|
@ -58,7 +58,7 @@ struct bridge_ioctl_extproc {
|
||||
u32 dsp_va; /* DSP virtual address */
|
||||
u32 gpp_pa; /* GPP physical address */
|
||||
/* GPP virtual address. __va does not work for ioremapped addresses */
|
||||
u32 ul_gpp_va;
|
||||
u32 gpp_va;
|
||||
u32 ul_size; /* Size of the mapped memory in bytes */
|
||||
enum hw_endianism_t endianism;
|
||||
enum hw_mmu_mixed_size_t mixed_mode;
|
||||
|
@ -29,7 +29,7 @@ struct mgr_object;
|
||||
|
||||
struct mgr_tlbentry {
|
||||
u32 dsp_virt; /* DSP virtual address */
|
||||
u32 ul_gpp_phys; /* GPP physical address */
|
||||
u32 gpp_phys; /* GPP physical address */
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -98,7 +98,7 @@ struct cmm_object {
|
||||
*/
|
||||
struct mutex cmm_lock; /* Lock to access cmm mgr */
|
||||
struct list_head node_free_list; /* Free list of memory nodes */
|
||||
u32 ul_min_block_size; /* Min SM block; default 16 bytes */
|
||||
u32 min_block_size; /* Min SM block; default 16 bytes */
|
||||
u32 page_size; /* Memory Page size (1k/4k) */
|
||||
/* GPP SM segment ptrs */
|
||||
struct cmm_allocator *pa_gppsm_seg_tab[CMM_MAXGPPSEGS];
|
||||
@ -106,7 +106,7 @@ struct cmm_object {
|
||||
|
||||
/* Default CMM Mgr attributes */
|
||||
static struct cmm_mgrattrs cmm_dfltmgrattrs = {
|
||||
/* ul_min_block_size, min block size(bytes) allocated by cmm mgr */
|
||||
/* min_block_size, min block size(bytes) allocated by cmm mgr */
|
||||
16
|
||||
};
|
||||
|
||||
@ -185,17 +185,17 @@ void *cmm_calloc_buf(struct cmm_object *hcmm_mgr, u32 usize,
|
||||
/* get the allocator object for this segment id */
|
||||
allocator =
|
||||
get_allocator(cmm_mgr_obj, pattrs->ul_seg_id);
|
||||
/* keep block size a multiple of ul_min_block_size */
|
||||
/* keep block size a multiple of min_block_size */
|
||||
usize =
|
||||
((usize - 1) & ~(cmm_mgr_obj->ul_min_block_size -
|
||||
((usize - 1) & ~(cmm_mgr_obj->min_block_size -
|
||||
1))
|
||||
+ cmm_mgr_obj->ul_min_block_size;
|
||||
+ cmm_mgr_obj->min_block_size;
|
||||
mutex_lock(&cmm_mgr_obj->cmm_lock);
|
||||
pnode = get_free_block(allocator, usize);
|
||||
}
|
||||
if (pnode) {
|
||||
delta_size = (pnode->ul_size - usize);
|
||||
if (delta_size >= cmm_mgr_obj->ul_min_block_size) {
|
||||
if (delta_size >= cmm_mgr_obj->min_block_size) {
|
||||
/* create a new block with the leftovers and
|
||||
* add to freelist */
|
||||
new_node =
|
||||
@ -257,9 +257,9 @@ int cmm_create(struct cmm_object **ph_cmm_mgr,
|
||||
mgr_attrts = &cmm_dfltmgrattrs; /* set defaults */
|
||||
|
||||
/* 4 bytes minimum */
|
||||
DBC_ASSERT(mgr_attrts->ul_min_block_size >= 4);
|
||||
DBC_ASSERT(mgr_attrts->min_block_size >= 4);
|
||||
/* save away smallest block allocation for this cmm mgr */
|
||||
cmm_obj->ul_min_block_size = mgr_attrts->ul_min_block_size;
|
||||
cmm_obj->min_block_size = mgr_attrts->min_block_size;
|
||||
cmm_obj->page_size = PAGE_SIZE;
|
||||
|
||||
/* create node free list */
|
||||
@ -426,25 +426,25 @@ int cmm_get_info(struct cmm_object *hcmm_mgr,
|
||||
return status;
|
||||
}
|
||||
mutex_lock(&cmm_mgr_obj->cmm_lock);
|
||||
cmm_info_obj->ul_num_gppsm_segs = 0; /* # of SM segments */
|
||||
cmm_info_obj->num_gppsm_segs = 0; /* # of SM segments */
|
||||
/* Total # of outstanding alloc */
|
||||
cmm_info_obj->ul_total_in_use_cnt = 0;
|
||||
/* min block size */
|
||||
cmm_info_obj->ul_min_block_size = cmm_mgr_obj->ul_min_block_size;
|
||||
cmm_info_obj->min_block_size = cmm_mgr_obj->min_block_size;
|
||||
/* check SM memory segments */
|
||||
for (ul_seg = 1; ul_seg <= CMM_MAXGPPSEGS; ul_seg++) {
|
||||
/* get the allocator object for this segment id */
|
||||
altr = get_allocator(cmm_mgr_obj, ul_seg);
|
||||
if (!altr)
|
||||
continue;
|
||||
cmm_info_obj->ul_num_gppsm_segs++;
|
||||
cmm_info_obj->num_gppsm_segs++;
|
||||
cmm_info_obj->seg_info[ul_seg - 1].seg_base_pa =
|
||||
altr->shm_base - altr->dsp_size;
|
||||
cmm_info_obj->seg_info[ul_seg - 1].ul_total_seg_size =
|
||||
altr->dsp_size + altr->ul_sm_size;
|
||||
cmm_info_obj->seg_info[ul_seg - 1].gpp_base_pa =
|
||||
altr->shm_base;
|
||||
cmm_info_obj->seg_info[ul_seg - 1].ul_gpp_size =
|
||||
cmm_info_obj->seg_info[ul_seg - 1].gpp_size =
|
||||
altr->ul_sm_size;
|
||||
cmm_info_obj->seg_info[ul_seg - 1].dsp_base_va =
|
||||
altr->dsp_base;
|
||||
@ -452,11 +452,11 @@ int cmm_get_info(struct cmm_object *hcmm_mgr,
|
||||
altr->dsp_size;
|
||||
cmm_info_obj->seg_info[ul_seg - 1].seg_base_va =
|
||||
altr->vm_base - altr->dsp_size;
|
||||
cmm_info_obj->seg_info[ul_seg - 1].ul_in_use_cnt = 0;
|
||||
cmm_info_obj->seg_info[ul_seg - 1].in_use_cnt = 0;
|
||||
|
||||
list_for_each_entry(curr, &altr->in_use_list, link) {
|
||||
cmm_info_obj->ul_total_in_use_cnt++;
|
||||
cmm_info_obj->seg_info[ul_seg - 1].ul_in_use_cnt++;
|
||||
cmm_info_obj->seg_info[ul_seg - 1].in_use_cnt++;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&cmm_mgr_obj->cmm_lock);
|
||||
@ -524,7 +524,7 @@ int cmm_register_gppsm_seg(struct cmm_object *hcmm_mgr,
|
||||
}
|
||||
|
||||
/* Check if input ul_size is big enough to alloc at least one block */
|
||||
if (ul_size < cmm_mgr_obj->ul_min_block_size) {
|
||||
if (ul_size < cmm_mgr_obj->min_block_size) {
|
||||
status = -EINVAL;
|
||||
goto func_end;
|
||||
}
|
||||
|
@ -123,7 +123,7 @@ struct dbll_library_obj {
|
||||
u32 open_ref; /* Number of times opened */
|
||||
u32 load_ref; /* Number of times loaded */
|
||||
struct gh_t_hash_tab *sym_tab; /* Hash table of symbols */
|
||||
u32 ul_pos;
|
||||
u32 pos;
|
||||
};
|
||||
|
||||
/*
|
||||
@ -398,7 +398,7 @@ int dbll_get_sect(struct dbll_library_obj *lib, char *name, u32 *paddr,
|
||||
|
||||
} else {
|
||||
(*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp,
|
||||
zl_lib->ul_pos,
|
||||
zl_lib->pos,
|
||||
SEEK_SET);
|
||||
}
|
||||
} else {
|
||||
@ -522,7 +522,7 @@ int dbll_load(struct dbll_library_obj *lib, dbll_flags flags,
|
||||
|
||||
}
|
||||
if (!status) {
|
||||
zl_lib->ul_pos = (*(zl_lib->target_obj->attrs.ftell))
|
||||
zl_lib->pos = (*(zl_lib->target_obj->attrs.ftell))
|
||||
(zl_lib->fp);
|
||||
/* Reset file cursor */
|
||||
(*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp,
|
||||
@ -599,7 +599,7 @@ int dbll_open(struct dbll_tar_obj *target, char *file, dbll_flags flags,
|
||||
if (zl_lib == NULL) {
|
||||
status = -ENOMEM;
|
||||
} else {
|
||||
zl_lib->ul_pos = 0;
|
||||
zl_lib->pos = 0;
|
||||
/* Increment ref count to allow close on failure
|
||||
* later on */
|
||||
zl_lib->open_ref++;
|
||||
@ -649,7 +649,7 @@ int dbll_open(struct dbll_tar_obj *target, char *file, dbll_flags flags,
|
||||
if (!status && zl_lib->fp == NULL)
|
||||
status = dof_open(zl_lib);
|
||||
|
||||
zl_lib->ul_pos = (*(zl_lib->target_obj->attrs.ftell)) (zl_lib->fp);
|
||||
zl_lib->pos = (*(zl_lib->target_obj->attrs.ftell)) (zl_lib->fp);
|
||||
(*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp, (long)0, SEEK_SET);
|
||||
/* Create a hash table for symbols if flag is set */
|
||||
if (zl_lib->sym_tab != NULL || !(flags & DBLL_SYMB))
|
||||
@ -738,7 +738,7 @@ int dbll_read_sect(struct dbll_library_obj *lib, char *name,
|
||||
|
||||
} else {
|
||||
(*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp,
|
||||
zl_lib->ul_pos,
|
||||
zl_lib->pos,
|
||||
SEEK_SET);
|
||||
}
|
||||
} else {
|
||||
|
@ -1253,7 +1253,7 @@ static int get_attrs_from_buf(char *psz_buf, u32 ul_buf_size,
|
||||
/* Heap Size for the node */
|
||||
gen_obj->obj_data.node_obj.
|
||||
ndb_props.node_profiles[i].
|
||||
ul_heap_size = atoi(token);
|
||||
heap_size = atoi(token);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1285,7 +1285,7 @@ static int get_attrs_from_buf(char *psz_buf, u32 ul_buf_size,
|
||||
gen_obj->obj_data.proc_info.clock_rate = atoi(token);
|
||||
token = strsep(&psz_cur, seps);
|
||||
|
||||
gen_obj->obj_data.proc_info.ul_internal_mem_size = atoi(token);
|
||||
gen_obj->obj_data.proc_info.internal_mem_size = atoi(token);
|
||||
token = strsep(&psz_cur, seps);
|
||||
|
||||
gen_obj->obj_data.proc_info.external_mem_size = atoi(token);
|
||||
@ -1308,7 +1308,7 @@ static int get_attrs_from_buf(char *psz_buf, u32 ul_buf_size,
|
||||
for (entry_id = 0; entry_id < 7; entry_id++) {
|
||||
token = strsep(&psz_cur, seps);
|
||||
gen_obj->obj_data.ext_proc_obj.ty_tlb[entry_id].
|
||||
ul_gpp_phys = atoi(token);
|
||||
gpp_phys = atoi(token);
|
||||
|
||||
token = strsep(&psz_cur, seps);
|
||||
gen_obj->obj_data.ext_proc_obj.ty_tlb[entry_id].
|
||||
|
@ -146,7 +146,7 @@ struct node_mgr {
|
||||
struct msg_mgr *msg_mgr_obj;
|
||||
|
||||
/* Processor properties needed by Node Dispatcher */
|
||||
u32 ul_num_chnls; /* Total number of channels */
|
||||
u32 num_chnls; /* Total number of channels */
|
||||
u32 chnl_offset; /* Offset of chnl ids rsvd for RMS */
|
||||
u32 chnl_buf_size; /* Buffer size for data to RMS */
|
||||
int proc_family; /* eg, 5000 */
|
||||
@ -1003,7 +1003,7 @@ int node_connect(struct node_object *node1, u32 stream1,
|
||||
set_bit(chnl_id, hnode_mgr->dma_chnl_map);
|
||||
/* dma chans are 2nd transport chnl set
|
||||
* ids(e.g. 16-31) */
|
||||
chnl_id = chnl_id + hnode_mgr->ul_num_chnls;
|
||||
chnl_id = chnl_id + hnode_mgr->num_chnls;
|
||||
}
|
||||
break;
|
||||
case STRMMODE_ZEROCOPY:
|
||||
@ -1014,7 +1014,7 @@ int node_connect(struct node_object *node1, u32 stream1,
|
||||
/* zero-copy chans are 3nd transport set
|
||||
* (e.g. 32-47) */
|
||||
chnl_id = chnl_id +
|
||||
(2 * hnode_mgr->ul_num_chnls);
|
||||
(2 * hnode_mgr->num_chnls);
|
||||
}
|
||||
break;
|
||||
case STRMMODE_PROCCOPY:
|
||||
@ -2723,15 +2723,15 @@ static void free_stream(struct node_mgr *hnode_mgr, struct stream_chnl stream)
|
||||
set_bit(stream.dev_id, hnode_mgr->pipe_done_map);
|
||||
}
|
||||
} else if (stream.type == HOSTCONNECT) {
|
||||
if (stream.dev_id < hnode_mgr->ul_num_chnls) {
|
||||
if (stream.dev_id < hnode_mgr->num_chnls) {
|
||||
clear_bit(stream.dev_id, hnode_mgr->chnl_map);
|
||||
} else if (stream.dev_id < (2 * hnode_mgr->ul_num_chnls)) {
|
||||
} else if (stream.dev_id < (2 * hnode_mgr->num_chnls)) {
|
||||
/* dsp-dma */
|
||||
clear_bit(stream.dev_id - (1 * hnode_mgr->ul_num_chnls),
|
||||
clear_bit(stream.dev_id - (1 * hnode_mgr->num_chnls),
|
||||
hnode_mgr->dma_chnl_map);
|
||||
} else if (stream.dev_id < (3 * hnode_mgr->ul_num_chnls)) {
|
||||
} else if (stream.dev_id < (3 * hnode_mgr->num_chnls)) {
|
||||
/* zero-copy */
|
||||
clear_bit(stream.dev_id - (2 * hnode_mgr->ul_num_chnls),
|
||||
clear_bit(stream.dev_id - (2 * hnode_mgr->num_chnls),
|
||||
hnode_mgr->zc_chnl_map);
|
||||
}
|
||||
}
|
||||
@ -2904,7 +2904,7 @@ static int get_proc_props(struct node_mgr *hnode_mgr,
|
||||
return -EPERM;
|
||||
hnode_mgr->chnl_offset = host_res->chnl_offset;
|
||||
hnode_mgr->chnl_buf_size = host_res->chnl_buf_size;
|
||||
hnode_mgr->ul_num_chnls = host_res->num_chnls;
|
||||
hnode_mgr->num_chnls = host_res->num_chnls;
|
||||
|
||||
/*
|
||||
* PROC will add an API to get dsp_processorinfo.
|
||||
|
@ -371,17 +371,17 @@ bool rmm_stat(struct rmm_target_obj *target, enum dsp_memtype segid,
|
||||
/* ul_size */
|
||||
mem_stat_buf->ul_size = target->seg_tab[segid].length;
|
||||
|
||||
/* ul_num_free_blocks */
|
||||
mem_stat_buf->ul_num_free_blocks = free_blocks;
|
||||
/* num_free_blocks */
|
||||
mem_stat_buf->num_free_blocks = free_blocks;
|
||||
|
||||
/* ul_total_free_size */
|
||||
mem_stat_buf->ul_total_free_size = total_free_size;
|
||||
|
||||
/* ul_len_max_free_block */
|
||||
mem_stat_buf->ul_len_max_free_block = max_free_size;
|
||||
/* len_max_free_block */
|
||||
mem_stat_buf->len_max_free_block = max_free_size;
|
||||
|
||||
/* ul_num_alloc_blocks */
|
||||
mem_stat_buf->ul_num_alloc_blocks =
|
||||
/* num_alloc_blocks */
|
||||
mem_stat_buf->num_alloc_blocks =
|
||||
target->seg_tab[segid].number;
|
||||
|
||||
ret = true;
|
||||
|
@ -71,7 +71,7 @@ struct strm_object {
|
||||
u32 utimeout;
|
||||
u32 num_bufs; /* Max # of bufs allowed in stream */
|
||||
u32 un_bufs_in_strm; /* Current # of bufs in stream */
|
||||
u32 ul_n_bytes; /* bytes transferred since idled */
|
||||
u32 bytes; /* bytes transferred since idled */
|
||||
/* STREAM_IDLE, STREAM_READY, ... */
|
||||
enum dsp_streamstate strm_state;
|
||||
void *user_event; /* Saved for strm_get_info() */
|
||||
@ -341,7 +341,7 @@ int strm_get_info(struct strm_object *stream_obj,
|
||||
stream_info->user_strm->number_bufs_in_stream = chnl_info_obj.cio_cs +
|
||||
chnl_info_obj.cio_reqs;
|
||||
/* # of bytes transferred since last call to DSPStream_Idle() */
|
||||
stream_info->user_strm->ul_number_bytes = chnl_info_obj.bytes_tx;
|
||||
stream_info->user_strm->number_bytes = chnl_info_obj.bytes_tx;
|
||||
stream_info->user_strm->sync_object_handle = chnl_info_obj.event_obj;
|
||||
/* Determine stream state based on channel state and info */
|
||||
if (chnl_info_obj.state & CHNL_STATEEOS) {
|
||||
|
Loading…
Reference in New Issue
Block a user