cxgb4: collect on-chip memory information
Collect memory layout of various on-chip memory regions. Move code for collecting on-chip memory information to cudbg_lib.c and update cxgb4_debugfs.c to use the common function. Also include cudbg_entity.h before cudbg_lib.h to avoid adding cudbg entity structure forward declarations in cudbg_lib.h. Signed-off-by: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com> Signed-off-by: Ganesh Goudar <ganeshgr@chelsio.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
62fd8b189e
commit
123e25c4a5
@ -87,6 +87,41 @@ struct cudbg_tp_la {
|
||||
u8 data[0];
|
||||
};
|
||||
|
||||
static const char * const cudbg_region[] = {
|
||||
"DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
|
||||
"Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
|
||||
"Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
|
||||
"TDDP region:", "TPT region:", "STAG region:", "RQ region:",
|
||||
"RQUDP region:", "PBL region:", "TXPBL region:",
|
||||
"DBVFIFO region:", "ULPRX state:", "ULPTX state:",
|
||||
"On-chip queues:"
|
||||
};
|
||||
|
||||
struct cudbg_mem_desc {
|
||||
u32 base;
|
||||
u32 limit;
|
||||
u32 idx;
|
||||
};
|
||||
|
||||
struct cudbg_meminfo {
|
||||
struct cudbg_mem_desc avail[4];
|
||||
struct cudbg_mem_desc mem[ARRAY_SIZE(cudbg_region) + 3];
|
||||
u32 avail_c;
|
||||
u32 mem_c;
|
||||
u32 up_ram_lo;
|
||||
u32 up_ram_hi;
|
||||
u32 up_extmem2_lo;
|
||||
u32 up_extmem2_hi;
|
||||
u32 rx_pages_data[3];
|
||||
u32 tx_pages_data[4];
|
||||
u32 p_structs;
|
||||
u32 reserved[12];
|
||||
u32 port_used[4];
|
||||
u32 port_alloc[4];
|
||||
u32 loopback_used[NCHAN];
|
||||
u32 loopback_alloc[NCHAN];
|
||||
};
|
||||
|
||||
struct cudbg_cim_pif_la {
|
||||
int size;
|
||||
u8 data[0];
|
||||
|
@ -56,6 +56,7 @@ enum cudbg_dbg_entity_type {
|
||||
CUDBG_SGE_INDIRECT = 37,
|
||||
CUDBG_ULPRX_LA = 41,
|
||||
CUDBG_TP_LA = 43,
|
||||
CUDBG_MEMINFO = 44,
|
||||
CUDBG_CIM_PIF_LA = 45,
|
||||
CUDBG_CLK = 46,
|
||||
CUDBG_CIM_OBQ_RXQ0 = 47,
|
||||
|
@ -15,12 +15,14 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/sort.h>
|
||||
|
||||
#include "t4_regs.h"
|
||||
#include "cxgb4.h"
|
||||
#include "cudbg_if.h"
|
||||
#include "cudbg_lib_common.h"
|
||||
#include "cudbg_lib.h"
|
||||
#include "cudbg_entity.h"
|
||||
#include "cudbg_lib.h"
|
||||
|
||||
static void cudbg_write_and_release_buff(struct cudbg_buffer *pin_buff,
|
||||
struct cudbg_buffer *dbg_buff)
|
||||
@ -84,6 +86,266 @@ static int cudbg_read_vpd_reg(struct adapter *padap, u32 addr, u32 len,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cudbg_mem_desc_cmp(const void *a, const void *b)
|
||||
{
|
||||
return ((const struct cudbg_mem_desc *)a)->base -
|
||||
((const struct cudbg_mem_desc *)b)->base;
|
||||
}
|
||||
|
||||
int cudbg_fill_meminfo(struct adapter *padap,
|
||||
struct cudbg_meminfo *meminfo_buff)
|
||||
{
|
||||
struct cudbg_mem_desc *md;
|
||||
u32 lo, hi, used, alloc;
|
||||
int n, i;
|
||||
|
||||
memset(meminfo_buff->avail, 0,
|
||||
ARRAY_SIZE(meminfo_buff->avail) *
|
||||
sizeof(struct cudbg_mem_desc));
|
||||
memset(meminfo_buff->mem, 0,
|
||||
(ARRAY_SIZE(cudbg_region) + 3) * sizeof(struct cudbg_mem_desc));
|
||||
md = meminfo_buff->mem;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(meminfo_buff->mem); i++) {
|
||||
meminfo_buff->mem[i].limit = 0;
|
||||
meminfo_buff->mem[i].idx = i;
|
||||
}
|
||||
|
||||
/* Find and sort the populated memory ranges */
|
||||
i = 0;
|
||||
lo = t4_read_reg(padap, MA_TARGET_MEM_ENABLE_A);
|
||||
if (lo & EDRAM0_ENABLE_F) {
|
||||
hi = t4_read_reg(padap, MA_EDRAM0_BAR_A);
|
||||
meminfo_buff->avail[i].base =
|
||||
cudbg_mbytes_to_bytes(EDRAM0_BASE_G(hi));
|
||||
meminfo_buff->avail[i].limit =
|
||||
meminfo_buff->avail[i].base +
|
||||
cudbg_mbytes_to_bytes(EDRAM0_SIZE_G(hi));
|
||||
meminfo_buff->avail[i].idx = 0;
|
||||
i++;
|
||||
}
|
||||
|
||||
if (lo & EDRAM1_ENABLE_F) {
|
||||
hi = t4_read_reg(padap, MA_EDRAM1_BAR_A);
|
||||
meminfo_buff->avail[i].base =
|
||||
cudbg_mbytes_to_bytes(EDRAM1_BASE_G(hi));
|
||||
meminfo_buff->avail[i].limit =
|
||||
meminfo_buff->avail[i].base +
|
||||
cudbg_mbytes_to_bytes(EDRAM1_SIZE_G(hi));
|
||||
meminfo_buff->avail[i].idx = 1;
|
||||
i++;
|
||||
}
|
||||
|
||||
if (is_t5(padap->params.chip)) {
|
||||
if (lo & EXT_MEM0_ENABLE_F) {
|
||||
hi = t4_read_reg(padap, MA_EXT_MEMORY0_BAR_A);
|
||||
meminfo_buff->avail[i].base =
|
||||
cudbg_mbytes_to_bytes(EXT_MEM_BASE_G(hi));
|
||||
meminfo_buff->avail[i].limit =
|
||||
meminfo_buff->avail[i].base +
|
||||
cudbg_mbytes_to_bytes(EXT_MEM_SIZE_G(hi));
|
||||
meminfo_buff->avail[i].idx = 3;
|
||||
i++;
|
||||
}
|
||||
|
||||
if (lo & EXT_MEM1_ENABLE_F) {
|
||||
hi = t4_read_reg(padap, MA_EXT_MEMORY1_BAR_A);
|
||||
meminfo_buff->avail[i].base =
|
||||
cudbg_mbytes_to_bytes(EXT_MEM1_BASE_G(hi));
|
||||
meminfo_buff->avail[i].limit =
|
||||
meminfo_buff->avail[i].base +
|
||||
cudbg_mbytes_to_bytes(EXT_MEM1_SIZE_G(hi));
|
||||
meminfo_buff->avail[i].idx = 4;
|
||||
i++;
|
||||
}
|
||||
} else {
|
||||
if (lo & EXT_MEM_ENABLE_F) {
|
||||
hi = t4_read_reg(padap, MA_EXT_MEMORY_BAR_A);
|
||||
meminfo_buff->avail[i].base =
|
||||
cudbg_mbytes_to_bytes(EXT_MEM_BASE_G(hi));
|
||||
meminfo_buff->avail[i].limit =
|
||||
meminfo_buff->avail[i].base +
|
||||
cudbg_mbytes_to_bytes(EXT_MEM_SIZE_G(hi));
|
||||
meminfo_buff->avail[i].idx = 2;
|
||||
i++;
|
||||
}
|
||||
}
|
||||
|
||||
if (!i) /* no memory available */
|
||||
return CUDBG_STATUS_ENTITY_NOT_FOUND;
|
||||
|
||||
meminfo_buff->avail_c = i;
|
||||
sort(meminfo_buff->avail, i, sizeof(struct cudbg_mem_desc),
|
||||
cudbg_mem_desc_cmp, NULL);
|
||||
(md++)->base = t4_read_reg(padap, SGE_DBQ_CTXT_BADDR_A);
|
||||
(md++)->base = t4_read_reg(padap, SGE_IMSG_CTXT_BADDR_A);
|
||||
(md++)->base = t4_read_reg(padap, SGE_FLM_CACHE_BADDR_A);
|
||||
(md++)->base = t4_read_reg(padap, TP_CMM_TCB_BASE_A);
|
||||
(md++)->base = t4_read_reg(padap, TP_CMM_MM_BASE_A);
|
||||
(md++)->base = t4_read_reg(padap, TP_CMM_TIMER_BASE_A);
|
||||
(md++)->base = t4_read_reg(padap, TP_CMM_MM_RX_FLST_BASE_A);
|
||||
(md++)->base = t4_read_reg(padap, TP_CMM_MM_TX_FLST_BASE_A);
|
||||
(md++)->base = t4_read_reg(padap, TP_CMM_MM_PS_FLST_BASE_A);
|
||||
|
||||
/* the next few have explicit upper bounds */
|
||||
md->base = t4_read_reg(padap, TP_PMM_TX_BASE_A);
|
||||
md->limit = md->base - 1 +
|
||||
t4_read_reg(padap, TP_PMM_TX_PAGE_SIZE_A) *
|
||||
PMTXMAXPAGE_G(t4_read_reg(padap, TP_PMM_TX_MAX_PAGE_A));
|
||||
md++;
|
||||
|
||||
md->base = t4_read_reg(padap, TP_PMM_RX_BASE_A);
|
||||
md->limit = md->base - 1 +
|
||||
t4_read_reg(padap, TP_PMM_RX_PAGE_SIZE_A) *
|
||||
PMRXMAXPAGE_G(t4_read_reg(padap, TP_PMM_RX_MAX_PAGE_A));
|
||||
md++;
|
||||
|
||||
if (t4_read_reg(padap, LE_DB_CONFIG_A) & HASHEN_F) {
|
||||
if (CHELSIO_CHIP_VERSION(padap->params.chip) <= CHELSIO_T5) {
|
||||
hi = t4_read_reg(padap, LE_DB_TID_HASHBASE_A) / 4;
|
||||
md->base = t4_read_reg(padap, LE_DB_HASH_TID_BASE_A);
|
||||
} else {
|
||||
hi = t4_read_reg(padap, LE_DB_HASH_TID_BASE_A);
|
||||
md->base = t4_read_reg(padap,
|
||||
LE_DB_HASH_TBL_BASE_ADDR_A);
|
||||
}
|
||||
md->limit = 0;
|
||||
} else {
|
||||
md->base = 0;
|
||||
md->idx = ARRAY_SIZE(cudbg_region); /* hide it */
|
||||
}
|
||||
md++;
|
||||
|
||||
#define ulp_region(reg) do { \
|
||||
md->base = t4_read_reg(padap, ULP_ ## reg ## _LLIMIT_A);\
|
||||
(md++)->limit = t4_read_reg(padap, ULP_ ## reg ## _ULIMIT_A);\
|
||||
} while (0)
|
||||
|
||||
ulp_region(RX_ISCSI);
|
||||
ulp_region(RX_TDDP);
|
||||
ulp_region(TX_TPT);
|
||||
ulp_region(RX_STAG);
|
||||
ulp_region(RX_RQ);
|
||||
ulp_region(RX_RQUDP);
|
||||
ulp_region(RX_PBL);
|
||||
ulp_region(TX_PBL);
|
||||
#undef ulp_region
|
||||
md->base = 0;
|
||||
md->idx = ARRAY_SIZE(cudbg_region);
|
||||
if (!is_t4(padap->params.chip)) {
|
||||
u32 fifo_size = t4_read_reg(padap, SGE_DBVFIFO_SIZE_A);
|
||||
u32 sge_ctrl = t4_read_reg(padap, SGE_CONTROL2_A);
|
||||
u32 size = 0;
|
||||
|
||||
if (is_t5(padap->params.chip)) {
|
||||
if (sge_ctrl & VFIFO_ENABLE_F)
|
||||
size = DBVFIFO_SIZE_G(fifo_size);
|
||||
} else {
|
||||
size = T6_DBVFIFO_SIZE_G(fifo_size);
|
||||
}
|
||||
|
||||
if (size) {
|
||||
md->base = BASEADDR_G(t4_read_reg(padap,
|
||||
SGE_DBVFIFO_BADDR_A));
|
||||
md->limit = md->base + (size << 2) - 1;
|
||||
}
|
||||
}
|
||||
|
||||
md++;
|
||||
|
||||
md->base = t4_read_reg(padap, ULP_RX_CTX_BASE_A);
|
||||
md->limit = 0;
|
||||
md++;
|
||||
md->base = t4_read_reg(padap, ULP_TX_ERR_TABLE_BASE_A);
|
||||
md->limit = 0;
|
||||
md++;
|
||||
|
||||
md->base = padap->vres.ocq.start;
|
||||
if (padap->vres.ocq.size)
|
||||
md->limit = md->base + padap->vres.ocq.size - 1;
|
||||
else
|
||||
md->idx = ARRAY_SIZE(cudbg_region); /* hide it */
|
||||
md++;
|
||||
|
||||
/* add any address-space holes, there can be up to 3 */
|
||||
for (n = 0; n < i - 1; n++)
|
||||
if (meminfo_buff->avail[n].limit <
|
||||
meminfo_buff->avail[n + 1].base)
|
||||
(md++)->base = meminfo_buff->avail[n].limit;
|
||||
|
||||
if (meminfo_buff->avail[n].limit)
|
||||
(md++)->base = meminfo_buff->avail[n].limit;
|
||||
|
||||
n = md - meminfo_buff->mem;
|
||||
meminfo_buff->mem_c = n;
|
||||
|
||||
sort(meminfo_buff->mem, n, sizeof(struct cudbg_mem_desc),
|
||||
cudbg_mem_desc_cmp, NULL);
|
||||
|
||||
lo = t4_read_reg(padap, CIM_SDRAM_BASE_ADDR_A);
|
||||
hi = t4_read_reg(padap, CIM_SDRAM_ADDR_SIZE_A) + lo - 1;
|
||||
meminfo_buff->up_ram_lo = lo;
|
||||
meminfo_buff->up_ram_hi = hi;
|
||||
|
||||
lo = t4_read_reg(padap, CIM_EXTMEM2_BASE_ADDR_A);
|
||||
hi = t4_read_reg(padap, CIM_EXTMEM2_ADDR_SIZE_A) + lo - 1;
|
||||
meminfo_buff->up_extmem2_lo = lo;
|
||||
meminfo_buff->up_extmem2_hi = hi;
|
||||
|
||||
lo = t4_read_reg(padap, TP_PMM_RX_MAX_PAGE_A);
|
||||
meminfo_buff->rx_pages_data[0] = PMRXMAXPAGE_G(lo);
|
||||
meminfo_buff->rx_pages_data[1] =
|
||||
t4_read_reg(padap, TP_PMM_RX_PAGE_SIZE_A) >> 10;
|
||||
meminfo_buff->rx_pages_data[2] = (lo & PMRXNUMCHN_F) ? 2 : 1;
|
||||
|
||||
lo = t4_read_reg(padap, TP_PMM_TX_MAX_PAGE_A);
|
||||
hi = t4_read_reg(padap, TP_PMM_TX_PAGE_SIZE_A);
|
||||
meminfo_buff->tx_pages_data[0] = PMTXMAXPAGE_G(lo);
|
||||
meminfo_buff->tx_pages_data[1] =
|
||||
hi >= (1 << 20) ? (hi >> 20) : (hi >> 10);
|
||||
meminfo_buff->tx_pages_data[2] =
|
||||
hi >= (1 << 20) ? 'M' : 'K';
|
||||
meminfo_buff->tx_pages_data[3] = 1 << PMTXNUMCHN_G(lo);
|
||||
|
||||
meminfo_buff->p_structs = t4_read_reg(padap, TP_CMM_MM_MAX_PSTRUCT_A);
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5)
|
||||
lo = t4_read_reg(padap,
|
||||
MPS_RX_MAC_BG_PG_CNT0_A + i * 4);
|
||||
else
|
||||
lo = t4_read_reg(padap, MPS_RX_PG_RSV0_A + i * 4);
|
||||
if (is_t5(padap->params.chip)) {
|
||||
used = T5_USED_G(lo);
|
||||
alloc = T5_ALLOC_G(lo);
|
||||
} else {
|
||||
used = USED_G(lo);
|
||||
alloc = ALLOC_G(lo);
|
||||
}
|
||||
meminfo_buff->port_used[i] = used;
|
||||
meminfo_buff->port_alloc[i] = alloc;
|
||||
}
|
||||
|
||||
for (i = 0; i < padap->params.arch.nchan; i++) {
|
||||
if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5)
|
||||
lo = t4_read_reg(padap,
|
||||
MPS_RX_LPBK_BG_PG_CNT0_A + i * 4);
|
||||
else
|
||||
lo = t4_read_reg(padap, MPS_RX_PG_RSV4_A + i * 4);
|
||||
if (is_t5(padap->params.chip)) {
|
||||
used = T5_USED_G(lo);
|
||||
alloc = T5_ALLOC_G(lo);
|
||||
} else {
|
||||
used = USED_G(lo);
|
||||
alloc = ALLOC_G(lo);
|
||||
}
|
||||
meminfo_buff->loopback_used[i] = used;
|
||||
meminfo_buff->loopback_alloc[i] = alloc;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init,
|
||||
struct cudbg_buffer *dbg_buff,
|
||||
struct cudbg_error *cudbg_err)
|
||||
@ -843,6 +1105,31 @@ int cudbg_collect_tp_la(struct cudbg_init *pdbg_init,
|
||||
return rc;
|
||||
}
|
||||
|
||||
int cudbg_collect_meminfo(struct cudbg_init *pdbg_init,
|
||||
struct cudbg_buffer *dbg_buff,
|
||||
struct cudbg_error *cudbg_err)
|
||||
{
|
||||
struct adapter *padap = pdbg_init->adap;
|
||||
struct cudbg_buffer temp_buff = { 0 };
|
||||
struct cudbg_meminfo *meminfo_buff;
|
||||
int rc;
|
||||
|
||||
rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_meminfo), &temp_buff);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
meminfo_buff = (struct cudbg_meminfo *)temp_buff.data;
|
||||
rc = cudbg_fill_meminfo(padap, meminfo_buff);
|
||||
if (rc) {
|
||||
cudbg_err->sys_err = rc;
|
||||
cudbg_put_buff(&temp_buff, dbg_buff);
|
||||
return rc;
|
||||
}
|
||||
|
||||
cudbg_write_and_release_buff(&temp_buff, dbg_buff);
|
||||
return rc;
|
||||
}
|
||||
|
||||
int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init,
|
||||
struct cudbg_buffer *dbg_buff,
|
||||
struct cudbg_error *cudbg_err)
|
||||
|
@ -102,6 +102,9 @@ int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init,
|
||||
int cudbg_collect_tp_la(struct cudbg_init *pdbg_init,
|
||||
struct cudbg_buffer *dbg_buff,
|
||||
struct cudbg_error *cudbg_err);
|
||||
int cudbg_collect_meminfo(struct cudbg_init *pdbg_init,
|
||||
struct cudbg_buffer *dbg_buff,
|
||||
struct cudbg_error *cudbg_err);
|
||||
int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init,
|
||||
struct cudbg_buffer *dbg_buff,
|
||||
struct cudbg_error *cudbg_err);
|
||||
@ -163,7 +166,8 @@ void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff,
|
||||
u32 cudbg_cim_obq_size(struct adapter *padap, int qid);
|
||||
int cudbg_dump_context_size(struct adapter *padap);
|
||||
|
||||
struct cudbg_tcam;
|
||||
int cudbg_fill_meminfo(struct adapter *padap,
|
||||
struct cudbg_meminfo *meminfo_buff);
|
||||
void cudbg_fill_le_tcam_info(struct adapter *padap,
|
||||
struct cudbg_tcam *tcam_region);
|
||||
#endif /* __CUDBG_LIB_H__ */
|
||||
|
@ -18,7 +18,6 @@
|
||||
#include "t4_regs.h"
|
||||
#include "cxgb4.h"
|
||||
#include "cxgb4_cudbg.h"
|
||||
#include "cudbg_entity.h"
|
||||
|
||||
static const struct cxgb4_collect_entity cxgb4_collect_mem_dump[] = {
|
||||
{ CUDBG_EDC0, cudbg_collect_edc0_meminfo },
|
||||
@ -53,6 +52,7 @@ static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = {
|
||||
{ CUDBG_SGE_INDIRECT, cudbg_collect_sge_indirect },
|
||||
{ CUDBG_ULPRX_LA, cudbg_collect_ulprx_la },
|
||||
{ CUDBG_TP_LA, cudbg_collect_tp_la },
|
||||
{ CUDBG_MEMINFO, cudbg_collect_meminfo },
|
||||
{ CUDBG_CIM_PIF_LA, cudbg_collect_cim_pif_la },
|
||||
{ CUDBG_CLK, cudbg_collect_clk_info },
|
||||
{ CUDBG_CIM_OBQ_RXQ0, cudbg_collect_obq_sge_rx_q0 },
|
||||
@ -201,6 +201,9 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
|
||||
case CUDBG_TP_LA:
|
||||
len = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64);
|
||||
break;
|
||||
case CUDBG_MEMINFO:
|
||||
len = sizeof(struct cudbg_meminfo);
|
||||
break;
|
||||
case CUDBG_CIM_PIF_LA:
|
||||
len = sizeof(struct cudbg_cim_pif_la);
|
||||
len += 2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
|
||||
|
@ -20,6 +20,7 @@
|
||||
|
||||
#include "cudbg_if.h"
|
||||
#include "cudbg_lib_common.h"
|
||||
#include "cudbg_entity.h"
|
||||
#include "cudbg_lib.h"
|
||||
|
||||
typedef int (*cudbg_collect_callback_t)(struct cudbg_init *pdbg_init,
|
||||
|
@ -45,6 +45,10 @@
|
||||
#include "cxgb4_debugfs.h"
|
||||
#include "clip_tbl.h"
|
||||
#include "l2t.h"
|
||||
#include "cudbg_if.h"
|
||||
#include "cudbg_lib_common.h"
|
||||
#include "cudbg_entity.h"
|
||||
#include "cudbg_lib.h"
|
||||
|
||||
/* generic seq_file support for showing a table of size rows x width. */
|
||||
static void *seq_tab_get_idx(struct seq_tab *tb, loff_t pos)
|
||||
@ -2794,18 +2798,6 @@ static const struct file_operations blocked_fl_fops = {
|
||||
.llseek = generic_file_llseek,
|
||||
};
|
||||
|
||||
struct mem_desc {
|
||||
unsigned int base;
|
||||
unsigned int limit;
|
||||
unsigned int idx;
|
||||
};
|
||||
|
||||
static int mem_desc_cmp(const void *a, const void *b)
|
||||
{
|
||||
return ((const struct mem_desc *)a)->base -
|
||||
((const struct mem_desc *)b)->base;
|
||||
}
|
||||
|
||||
static void mem_region_show(struct seq_file *seq, const char *name,
|
||||
unsigned int from, unsigned int to)
|
||||
{
|
||||
@ -2819,250 +2811,60 @@ static void mem_region_show(struct seq_file *seq, const char *name,
|
||||
static int meminfo_show(struct seq_file *seq, void *v)
|
||||
{
|
||||
static const char * const memory[] = { "EDC0:", "EDC1:", "MC:",
|
||||
"MC0:", "MC1:"};
|
||||
static const char * const region[] = {
|
||||
"DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
|
||||
"Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
|
||||
"Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
|
||||
"TDDP region:", "TPT region:", "STAG region:", "RQ region:",
|
||||
"RQUDP region:", "PBL region:", "TXPBL region:",
|
||||
"DBVFIFO region:", "ULPRX state:", "ULPTX state:",
|
||||
"On-chip queues:"
|
||||
};
|
||||
|
||||
int i, n;
|
||||
u32 lo, hi, used, alloc;
|
||||
struct mem_desc avail[4];
|
||||
struct mem_desc mem[ARRAY_SIZE(region) + 3]; /* up to 3 holes */
|
||||
struct mem_desc *md = mem;
|
||||
"MC0:", "MC1:"};
|
||||
struct adapter *adap = seq->private;
|
||||
struct cudbg_meminfo meminfo;
|
||||
int i, rc;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(mem); i++) {
|
||||
mem[i].limit = 0;
|
||||
mem[i].idx = i;
|
||||
}
|
||||
memset(&meminfo, 0, sizeof(struct cudbg_meminfo));
|
||||
rc = cudbg_fill_meminfo(adap, &meminfo);
|
||||
if (rc)
|
||||
return -ENXIO;
|
||||
|
||||
/* Find and sort the populated memory ranges */
|
||||
i = 0;
|
||||
lo = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
|
||||
if (lo & EDRAM0_ENABLE_F) {
|
||||
hi = t4_read_reg(adap, MA_EDRAM0_BAR_A);
|
||||
avail[i].base = EDRAM0_BASE_G(hi) << 20;
|
||||
avail[i].limit = avail[i].base + (EDRAM0_SIZE_G(hi) << 20);
|
||||
avail[i].idx = 0;
|
||||
i++;
|
||||
}
|
||||
if (lo & EDRAM1_ENABLE_F) {
|
||||
hi = t4_read_reg(adap, MA_EDRAM1_BAR_A);
|
||||
avail[i].base = EDRAM1_BASE_G(hi) << 20;
|
||||
avail[i].limit = avail[i].base + (EDRAM1_SIZE_G(hi) << 20);
|
||||
avail[i].idx = 1;
|
||||
i++;
|
||||
}
|
||||
|
||||
if (is_t5(adap->params.chip)) {
|
||||
if (lo & EXT_MEM0_ENABLE_F) {
|
||||
hi = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
|
||||
avail[i].base = EXT_MEM0_BASE_G(hi) << 20;
|
||||
avail[i].limit =
|
||||
avail[i].base + (EXT_MEM0_SIZE_G(hi) << 20);
|
||||
avail[i].idx = 3;
|
||||
i++;
|
||||
}
|
||||
if (lo & EXT_MEM1_ENABLE_F) {
|
||||
hi = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
|
||||
avail[i].base = EXT_MEM1_BASE_G(hi) << 20;
|
||||
avail[i].limit =
|
||||
avail[i].base + (EXT_MEM1_SIZE_G(hi) << 20);
|
||||
avail[i].idx = 4;
|
||||
i++;
|
||||
}
|
||||
} else {
|
||||
if (lo & EXT_MEM_ENABLE_F) {
|
||||
hi = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
|
||||
avail[i].base = EXT_MEM_BASE_G(hi) << 20;
|
||||
avail[i].limit =
|
||||
avail[i].base + (EXT_MEM_SIZE_G(hi) << 20);
|
||||
avail[i].idx = 2;
|
||||
i++;
|
||||
}
|
||||
}
|
||||
if (!i) /* no memory available */
|
||||
return 0;
|
||||
sort(avail, i, sizeof(struct mem_desc), mem_desc_cmp, NULL);
|
||||
|
||||
(md++)->base = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A);
|
||||
(md++)->base = t4_read_reg(adap, SGE_IMSG_CTXT_BADDR_A);
|
||||
(md++)->base = t4_read_reg(adap, SGE_FLM_CACHE_BADDR_A);
|
||||
(md++)->base = t4_read_reg(adap, TP_CMM_TCB_BASE_A);
|
||||
(md++)->base = t4_read_reg(adap, TP_CMM_MM_BASE_A);
|
||||
(md++)->base = t4_read_reg(adap, TP_CMM_TIMER_BASE_A);
|
||||
(md++)->base = t4_read_reg(adap, TP_CMM_MM_RX_FLST_BASE_A);
|
||||
(md++)->base = t4_read_reg(adap, TP_CMM_MM_TX_FLST_BASE_A);
|
||||
(md++)->base = t4_read_reg(adap, TP_CMM_MM_PS_FLST_BASE_A);
|
||||
|
||||
/* the next few have explicit upper bounds */
|
||||
md->base = t4_read_reg(adap, TP_PMM_TX_BASE_A);
|
||||
md->limit = md->base - 1 +
|
||||
t4_read_reg(adap, TP_PMM_TX_PAGE_SIZE_A) *
|
||||
PMTXMAXPAGE_G(t4_read_reg(adap, TP_PMM_TX_MAX_PAGE_A));
|
||||
md++;
|
||||
|
||||
md->base = t4_read_reg(adap, TP_PMM_RX_BASE_A);
|
||||
md->limit = md->base - 1 +
|
||||
t4_read_reg(adap, TP_PMM_RX_PAGE_SIZE_A) *
|
||||
PMRXMAXPAGE_G(t4_read_reg(adap, TP_PMM_RX_MAX_PAGE_A));
|
||||
md++;
|
||||
|
||||
if (t4_read_reg(adap, LE_DB_CONFIG_A) & HASHEN_F) {
|
||||
if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) {
|
||||
hi = t4_read_reg(adap, LE_DB_TID_HASHBASE_A) / 4;
|
||||
md->base = t4_read_reg(adap, LE_DB_HASH_TID_BASE_A);
|
||||
} else {
|
||||
hi = t4_read_reg(adap, LE_DB_HASH_TID_BASE_A);
|
||||
md->base = t4_read_reg(adap,
|
||||
LE_DB_HASH_TBL_BASE_ADDR_A);
|
||||
}
|
||||
md->limit = 0;
|
||||
} else {
|
||||
md->base = 0;
|
||||
md->idx = ARRAY_SIZE(region); /* hide it */
|
||||
}
|
||||
md++;
|
||||
|
||||
#define ulp_region(reg) do { \
|
||||
md->base = t4_read_reg(adap, ULP_ ## reg ## _LLIMIT_A);\
|
||||
(md++)->limit = t4_read_reg(adap, ULP_ ## reg ## _ULIMIT_A); \
|
||||
} while (0)
|
||||
|
||||
ulp_region(RX_ISCSI);
|
||||
ulp_region(RX_TDDP);
|
||||
ulp_region(TX_TPT);
|
||||
ulp_region(RX_STAG);
|
||||
ulp_region(RX_RQ);
|
||||
ulp_region(RX_RQUDP);
|
||||
ulp_region(RX_PBL);
|
||||
ulp_region(TX_PBL);
|
||||
#undef ulp_region
|
||||
md->base = 0;
|
||||
md->idx = ARRAY_SIZE(region);
|
||||
if (!is_t4(adap->params.chip)) {
|
||||
u32 size = 0;
|
||||
u32 sge_ctrl = t4_read_reg(adap, SGE_CONTROL2_A);
|
||||
u32 fifo_size = t4_read_reg(adap, SGE_DBVFIFO_SIZE_A);
|
||||
|
||||
if (is_t5(adap->params.chip)) {
|
||||
if (sge_ctrl & VFIFO_ENABLE_F)
|
||||
size = DBVFIFO_SIZE_G(fifo_size);
|
||||
} else {
|
||||
size = T6_DBVFIFO_SIZE_G(fifo_size);
|
||||
}
|
||||
|
||||
if (size) {
|
||||
md->base = BASEADDR_G(t4_read_reg(adap,
|
||||
SGE_DBVFIFO_BADDR_A));
|
||||
md->limit = md->base + (size << 2) - 1;
|
||||
}
|
||||
}
|
||||
|
||||
md++;
|
||||
|
||||
md->base = t4_read_reg(adap, ULP_RX_CTX_BASE_A);
|
||||
md->limit = 0;
|
||||
md++;
|
||||
md->base = t4_read_reg(adap, ULP_TX_ERR_TABLE_BASE_A);
|
||||
md->limit = 0;
|
||||
md++;
|
||||
|
||||
md->base = adap->vres.ocq.start;
|
||||
if (adap->vres.ocq.size)
|
||||
md->limit = md->base + adap->vres.ocq.size - 1;
|
||||
else
|
||||
md->idx = ARRAY_SIZE(region); /* hide it */
|
||||
md++;
|
||||
|
||||
/* add any address-space holes, there can be up to 3 */
|
||||
for (n = 0; n < i - 1; n++)
|
||||
if (avail[n].limit < avail[n + 1].base)
|
||||
(md++)->base = avail[n].limit;
|
||||
if (avail[n].limit)
|
||||
(md++)->base = avail[n].limit;
|
||||
|
||||
n = md - mem;
|
||||
sort(mem, n, sizeof(struct mem_desc), mem_desc_cmp, NULL);
|
||||
|
||||
for (lo = 0; lo < i; lo++)
|
||||
mem_region_show(seq, memory[avail[lo].idx], avail[lo].base,
|
||||
avail[lo].limit - 1);
|
||||
for (i = 0; i < meminfo.avail_c; i++)
|
||||
mem_region_show(seq, memory[meminfo.avail[i].idx],
|
||||
meminfo.avail[i].base,
|
||||
meminfo.avail[i].limit - 1);
|
||||
|
||||
seq_putc(seq, '\n');
|
||||
for (i = 0; i < n; i++) {
|
||||
if (mem[i].idx >= ARRAY_SIZE(region))
|
||||
for (i = 0; i < meminfo.mem_c; i++) {
|
||||
if (meminfo.mem[i].idx >= ARRAY_SIZE(cudbg_region))
|
||||
continue; /* skip holes */
|
||||
if (!mem[i].limit)
|
||||
mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
|
||||
mem_region_show(seq, region[mem[i].idx], mem[i].base,
|
||||
mem[i].limit);
|
||||
if (!meminfo.mem[i].limit)
|
||||
meminfo.mem[i].limit =
|
||||
i < meminfo.mem_c - 1 ?
|
||||
meminfo.mem[i + 1].base - 1 : ~0;
|
||||
mem_region_show(seq, cudbg_region[meminfo.mem[i].idx],
|
||||
meminfo.mem[i].base, meminfo.mem[i].limit);
|
||||
}
|
||||
|
||||
seq_putc(seq, '\n');
|
||||
lo = t4_read_reg(adap, CIM_SDRAM_BASE_ADDR_A);
|
||||
hi = t4_read_reg(adap, CIM_SDRAM_ADDR_SIZE_A) + lo - 1;
|
||||
mem_region_show(seq, "uP RAM:", lo, hi);
|
||||
mem_region_show(seq, "uP RAM:", meminfo.up_ram_lo, meminfo.up_ram_hi);
|
||||
mem_region_show(seq, "uP Extmem2:", meminfo.up_extmem2_lo,
|
||||
meminfo.up_extmem2_hi);
|
||||
|
||||
lo = t4_read_reg(adap, CIM_EXTMEM2_BASE_ADDR_A);
|
||||
hi = t4_read_reg(adap, CIM_EXTMEM2_ADDR_SIZE_A) + lo - 1;
|
||||
mem_region_show(seq, "uP Extmem2:", lo, hi);
|
||||
|
||||
lo = t4_read_reg(adap, TP_PMM_RX_MAX_PAGE_A);
|
||||
seq_printf(seq, "\n%u Rx pages of size %uKiB for %u channels\n",
|
||||
PMRXMAXPAGE_G(lo),
|
||||
t4_read_reg(adap, TP_PMM_RX_PAGE_SIZE_A) >> 10,
|
||||
(lo & PMRXNUMCHN_F) ? 2 : 1);
|
||||
meminfo.rx_pages_data[0], meminfo.rx_pages_data[1],
|
||||
meminfo.rx_pages_data[2]);
|
||||
|
||||
lo = t4_read_reg(adap, TP_PMM_TX_MAX_PAGE_A);
|
||||
hi = t4_read_reg(adap, TP_PMM_TX_PAGE_SIZE_A);
|
||||
seq_printf(seq, "%u Tx pages of size %u%ciB for %u channels\n",
|
||||
PMTXMAXPAGE_G(lo),
|
||||
hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
|
||||
hi >= (1 << 20) ? 'M' : 'K', 1 << PMTXNUMCHN_G(lo));
|
||||
seq_printf(seq, "%u p-structs\n\n",
|
||||
t4_read_reg(adap, TP_CMM_MM_MAX_PSTRUCT_A));
|
||||
meminfo.tx_pages_data[0], meminfo.tx_pages_data[1],
|
||||
meminfo.tx_pages_data[2], meminfo.tx_pages_data[3]);
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5)
|
||||
lo = t4_read_reg(adap, MPS_RX_MAC_BG_PG_CNT0_A + i * 4);
|
||||
else
|
||||
lo = t4_read_reg(adap, MPS_RX_PG_RSV0_A + i * 4);
|
||||
if (is_t5(adap->params.chip)) {
|
||||
used = T5_USED_G(lo);
|
||||
alloc = T5_ALLOC_G(lo);
|
||||
} else {
|
||||
used = USED_G(lo);
|
||||
alloc = ALLOC_G(lo);
|
||||
}
|
||||
seq_printf(seq, "%u p-structs\n\n", meminfo.p_structs);
|
||||
|
||||
for (i = 0; i < 4; i++)
|
||||
/* For T6 these are MAC buffer groups */
|
||||
seq_printf(seq, "Port %d using %u pages out of %u allocated\n",
|
||||
i, used, alloc);
|
||||
}
|
||||
for (i = 0; i < adap->params.arch.nchan; i++) {
|
||||
if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5)
|
||||
lo = t4_read_reg(adap,
|
||||
MPS_RX_LPBK_BG_PG_CNT0_A + i * 4);
|
||||
else
|
||||
lo = t4_read_reg(adap, MPS_RX_PG_RSV4_A + i * 4);
|
||||
if (is_t5(adap->params.chip)) {
|
||||
used = T5_USED_G(lo);
|
||||
alloc = T5_ALLOC_G(lo);
|
||||
} else {
|
||||
used = USED_G(lo);
|
||||
alloc = ALLOC_G(lo);
|
||||
}
|
||||
i, meminfo.port_used[i], meminfo.port_alloc[i]);
|
||||
|
||||
for (i = 0; i < adap->params.arch.nchan; i++)
|
||||
/* For T6 these are MAC buffer groups */
|
||||
seq_printf(seq,
|
||||
"Loopback %d using %u pages out of %u allocated\n",
|
||||
i, used, alloc);
|
||||
}
|
||||
i, meminfo.loopback_used[i],
|
||||
meminfo.loopback_alloc[i]);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user