2022-05-25 16:40:19 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/file.h>
|
|
|
|
#include <linux/proc_fs.h>
|
|
|
|
#include <linux/seq_file.h>
|
|
|
|
#include <linux/io_uring.h>
|
|
|
|
|
|
|
|
#include <uapi/linux/io_uring.h>
|
|
|
|
|
|
|
|
#include "io_uring.h"
|
|
|
|
#include "sqpoll.h"
|
|
|
|
#include "fdinfo.h"
|
2022-06-16 09:22:02 +00:00
|
|
|
#include "cancel.h"
|
2022-06-19 01:44:33 +00:00
|
|
|
#include "rsrc.h"
|
2022-05-25 16:40:19 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_PROC_FS
|
|
|
|
static __cold int io_uring_show_cred(struct seq_file *m, unsigned int id,
|
|
|
|
const struct cred *cred)
|
|
|
|
{
|
|
|
|
struct user_namespace *uns = seq_user_ns(m);
|
|
|
|
struct group_info *gi;
|
|
|
|
kernel_cap_t cap;
|
|
|
|
int g;
|
|
|
|
|
|
|
|
seq_printf(m, "%5d\n", id);
|
|
|
|
seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
|
|
|
|
seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
|
|
|
|
seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
|
|
|
|
seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
|
|
|
|
seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
|
|
|
|
seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
|
|
|
|
seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
|
|
|
|
seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
|
|
|
|
seq_puts(m, "\n\tGroups:\t");
|
|
|
|
gi = cred->group_info;
|
|
|
|
for (g = 0; g < gi->ngroups; g++) {
|
|
|
|
seq_put_decimal_ull(m, g ? " " : "",
|
|
|
|
from_kgid_munged(uns, gi->gid[g]));
|
|
|
|
}
|
|
|
|
seq_puts(m, "\n\tCapEff:\t");
|
|
|
|
cap = cred->cap_effective;
|
2023-02-28 19:39:09 +00:00
|
|
|
seq_put_hex_ll(m, NULL, cap.val, 16);
|
2022-05-25 16:40:19 +00:00
|
|
|
seq_putc(m, '\n');
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2024-10-13 18:29:24 +00:00
|
|
|
#ifdef CONFIG_NET_RX_BUSY_POLL
|
|
|
|
static __cold void common_tracking_show_fdinfo(struct io_ring_ctx *ctx,
|
|
|
|
struct seq_file *m,
|
|
|
|
const char *tracking_strategy)
|
|
|
|
{
|
|
|
|
seq_puts(m, "NAPI:\tenabled\n");
|
|
|
|
seq_printf(m, "napi tracking:\t%s\n", tracking_strategy);
|
|
|
|
seq_printf(m, "napi_busy_poll_dt:\t%llu\n", ctx->napi_busy_poll_dt);
|
|
|
|
if (ctx->napi_prefer_busy_poll)
|
|
|
|
seq_puts(m, "napi_prefer_busy_poll:\ttrue\n");
|
|
|
|
else
|
|
|
|
seq_puts(m, "napi_prefer_busy_poll:\tfalse\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
static __cold void napi_show_fdinfo(struct io_ring_ctx *ctx,
|
|
|
|
struct seq_file *m)
|
|
|
|
{
|
|
|
|
unsigned int mode = READ_ONCE(ctx->napi_track_mode);
|
|
|
|
|
|
|
|
switch (mode) {
|
|
|
|
case IO_URING_NAPI_TRACKING_INACTIVE:
|
|
|
|
seq_puts(m, "NAPI:\tdisabled\n");
|
|
|
|
break;
|
|
|
|
case IO_URING_NAPI_TRACKING_DYNAMIC:
|
|
|
|
common_tracking_show_fdinfo(ctx, m, "dynamic");
|
|
|
|
break;
|
|
|
|
case IO_URING_NAPI_TRACKING_STATIC:
|
|
|
|
common_tracking_show_fdinfo(ctx, m, "static");
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
seq_printf(m, "NAPI:\tunknown mode (%u)\n", mode);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static inline void napi_show_fdinfo(struct io_ring_ctx *ctx,
|
|
|
|
struct seq_file *m)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2023-07-10 19:13:54 +00:00
|
|
|
/*
|
|
|
|
* Caller holds a reference to the file already, we don't need to do
|
|
|
|
* anything else to get an extra reference.
|
|
|
|
*/
|
2024-03-29 23:19:45 +00:00
|
|
|
__cold void io_uring_show_fdinfo(struct seq_file *m, struct file *file)
|
2022-05-25 16:40:19 +00:00
|
|
|
{
|
2024-03-29 23:19:45 +00:00
|
|
|
struct io_ring_ctx *ctx = file->private_data;
|
2022-05-25 16:40:19 +00:00
|
|
|
struct io_overflow_cqe *ocqe;
|
|
|
|
struct io_rings *r = ctx->rings;
|
2024-02-28 09:12:51 +00:00
|
|
|
struct rusage sq_usage;
|
2022-05-25 16:40:19 +00:00
|
|
|
unsigned int sq_mask = ctx->sq_entries - 1, cq_mask = ctx->cq_entries - 1;
|
|
|
|
unsigned int sq_head = READ_ONCE(r->sq.head);
|
|
|
|
unsigned int sq_tail = READ_ONCE(r->sq.tail);
|
|
|
|
unsigned int cq_head = READ_ONCE(r->cq.head);
|
|
|
|
unsigned int cq_tail = READ_ONCE(r->cq.tail);
|
|
|
|
unsigned int cq_shift = 0;
|
io_uring/fdinfo: fix sqe dumping for IORING_SETUP_SQE128
If we have doubly sized SQEs, then we need to shift the sq index by 1
to account for using two entries for a single request. The CQE dumping
gets this right, but the SQE one does not.
Improve the SQE dumping in general, the information dumped is pretty
sparse and doesn't even cover the whole basic part of the SQE. Include
information on the extended part of the SQE, if doubly sized SQEs are
in use. A typical dump now looks like the following:
[...]
SQEs: 32
32: opcode:URING_CMD, fd:0, flags:1, off:3225964160, addr:0x0, rw_flags:0x0, buf_index:0 user_data:2721, e0:0x0, e1:0xffffb8041000, e2:0x100000000000, e3:0x5500, e4:0x7, e5:0x0, e6:0x0, e7:0x0
33: opcode:URING_CMD, fd:0, flags:1, off:3225964160, addr:0x0, rw_flags:0x0, buf_index:0 user_data:2722, e0:0x0, e1:0xffffb8043000, e2:0x100000000000, e3:0x5508, e4:0x7, e5:0x0, e6:0x0, e7:0x0
34: opcode:URING_CMD, fd:0, flags:1, off:3225964160, addr:0x0, rw_flags:0x0, buf_index:0 user_data:2723, e0:0x0, e1:0xffffb8045000, e2:0x100000000000, e3:0x5510, e4:0x7, e5:0x0, e6:0x0, e7:0x0
[...]
Fixes: ebdeb7c01d02 ("io_uring: add support for 128-byte SQEs")
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2022-09-11 12:40:37 +00:00
|
|
|
unsigned int sq_shift = 0;
|
2022-05-25 16:40:19 +00:00
|
|
|
unsigned int sq_entries, cq_entries;
|
2023-10-21 18:30:29 +00:00
|
|
|
int sq_pid = -1, sq_cpu = -1;
|
2024-02-28 09:12:51 +00:00
|
|
|
u64 sq_total_time = 0, sq_work_time = 0;
|
2022-05-25 16:40:19 +00:00
|
|
|
bool has_lock;
|
|
|
|
unsigned int i;
|
|
|
|
|
2022-09-11 12:36:09 +00:00
|
|
|
if (ctx->flags & IORING_SETUP_CQE32)
|
2022-05-25 16:40:19 +00:00
|
|
|
cq_shift = 1;
|
io_uring/fdinfo: fix sqe dumping for IORING_SETUP_SQE128
If we have doubly sized SQEs, then we need to shift the sq index by 1
to account for using two entries for a single request. The CQE dumping
gets this right, but the SQE one does not.
Improve the SQE dumping in general, the information dumped is pretty
sparse and doesn't even cover the whole basic part of the SQE. Include
information on the extended part of the SQE, if doubly sized SQEs are
in use. A typical dump now looks like the following:
[...]
SQEs: 32
32: opcode:URING_CMD, fd:0, flags:1, off:3225964160, addr:0x0, rw_flags:0x0, buf_index:0 user_data:2721, e0:0x0, e1:0xffffb8041000, e2:0x100000000000, e3:0x5500, e4:0x7, e5:0x0, e6:0x0, e7:0x0
33: opcode:URING_CMD, fd:0, flags:1, off:3225964160, addr:0x0, rw_flags:0x0, buf_index:0 user_data:2722, e0:0x0, e1:0xffffb8043000, e2:0x100000000000, e3:0x5508, e4:0x7, e5:0x0, e6:0x0, e7:0x0
34: opcode:URING_CMD, fd:0, flags:1, off:3225964160, addr:0x0, rw_flags:0x0, buf_index:0 user_data:2723, e0:0x0, e1:0xffffb8045000, e2:0x100000000000, e3:0x5510, e4:0x7, e5:0x0, e6:0x0, e7:0x0
[...]
Fixes: ebdeb7c01d02 ("io_uring: add support for 128-byte SQEs")
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2022-09-11 12:40:37 +00:00
|
|
|
if (ctx->flags & IORING_SETUP_SQE128)
|
|
|
|
sq_shift = 1;
|
2022-05-25 16:40:19 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* we may get imprecise sqe and cqe info if uring is actively running
|
|
|
|
* since we get cached_sq_head and cached_cq_tail without uring_lock
|
|
|
|
* and sq_tail and cq_head are changed by userspace. But it's ok since
|
|
|
|
* we usually use these info when it is stuck.
|
|
|
|
*/
|
|
|
|
seq_printf(m, "SqMask:\t0x%x\n", sq_mask);
|
|
|
|
seq_printf(m, "SqHead:\t%u\n", sq_head);
|
|
|
|
seq_printf(m, "SqTail:\t%u\n", sq_tail);
|
|
|
|
seq_printf(m, "CachedSqHead:\t%u\n", ctx->cached_sq_head);
|
|
|
|
seq_printf(m, "CqMask:\t0x%x\n", cq_mask);
|
|
|
|
seq_printf(m, "CqHead:\t%u\n", cq_head);
|
|
|
|
seq_printf(m, "CqTail:\t%u\n", cq_tail);
|
|
|
|
seq_printf(m, "CachedCqTail:\t%u\n", ctx->cached_cq_tail);
|
io_uring/fdinfo: fix sqe dumping for IORING_SETUP_SQE128
If we have doubly sized SQEs, then we need to shift the sq index by 1
to account for using two entries for a single request. The CQE dumping
gets this right, but the SQE one does not.
Improve the SQE dumping in general, the information dumped is pretty
sparse and doesn't even cover the whole basic part of the SQE. Include
information on the extended part of the SQE, if doubly sized SQEs are
in use. A typical dump now looks like the following:
[...]
SQEs: 32
32: opcode:URING_CMD, fd:0, flags:1, off:3225964160, addr:0x0, rw_flags:0x0, buf_index:0 user_data:2721, e0:0x0, e1:0xffffb8041000, e2:0x100000000000, e3:0x5500, e4:0x7, e5:0x0, e6:0x0, e7:0x0
33: opcode:URING_CMD, fd:0, flags:1, off:3225964160, addr:0x0, rw_flags:0x0, buf_index:0 user_data:2722, e0:0x0, e1:0xffffb8043000, e2:0x100000000000, e3:0x5508, e4:0x7, e5:0x0, e6:0x0, e7:0x0
34: opcode:URING_CMD, fd:0, flags:1, off:3225964160, addr:0x0, rw_flags:0x0, buf_index:0 user_data:2723, e0:0x0, e1:0xffffb8045000, e2:0x100000000000, e3:0x5510, e4:0x7, e5:0x0, e6:0x0, e7:0x0
[...]
Fixes: ebdeb7c01d02 ("io_uring: add support for 128-byte SQEs")
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2022-09-11 12:40:37 +00:00
|
|
|
seq_printf(m, "SQEs:\t%u\n", sq_tail - sq_head);
|
2022-05-25 16:40:19 +00:00
|
|
|
sq_entries = min(sq_tail - sq_head, ctx->sq_entries);
|
|
|
|
for (i = 0; i < sq_entries; i++) {
|
|
|
|
unsigned int entry = i + sq_head;
|
|
|
|
struct io_uring_sqe *sqe;
|
io_uring/fdinfo: fix sqe dumping for IORING_SETUP_SQE128
If we have doubly sized SQEs, then we need to shift the sq index by 1
to account for using two entries for a single request. The CQE dumping
gets this right, but the SQE one does not.
Improve the SQE dumping in general, the information dumped is pretty
sparse and doesn't even cover the whole basic part of the SQE. Include
information on the extended part of the SQE, if doubly sized SQEs are
in use. A typical dump now looks like the following:
[...]
SQEs: 32
32: opcode:URING_CMD, fd:0, flags:1, off:3225964160, addr:0x0, rw_flags:0x0, buf_index:0 user_data:2721, e0:0x0, e1:0xffffb8041000, e2:0x100000000000, e3:0x5500, e4:0x7, e5:0x0, e6:0x0, e7:0x0
33: opcode:URING_CMD, fd:0, flags:1, off:3225964160, addr:0x0, rw_flags:0x0, buf_index:0 user_data:2722, e0:0x0, e1:0xffffb8043000, e2:0x100000000000, e3:0x5508, e4:0x7, e5:0x0, e6:0x0, e7:0x0
34: opcode:URING_CMD, fd:0, flags:1, off:3225964160, addr:0x0, rw_flags:0x0, buf_index:0 user_data:2723, e0:0x0, e1:0xffffb8045000, e2:0x100000000000, e3:0x5510, e4:0x7, e5:0x0, e6:0x0, e7:0x0
[...]
Fixes: ebdeb7c01d02 ("io_uring: add support for 128-byte SQEs")
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2022-09-11 12:40:37 +00:00
|
|
|
unsigned int sq_idx;
|
2022-05-25 16:40:19 +00:00
|
|
|
|
2023-09-01 19:59:19 +00:00
|
|
|
if (ctx->flags & IORING_SETUP_NO_SQARRAY)
|
|
|
|
break;
|
io_uring/fdinfo: fix sqe dumping for IORING_SETUP_SQE128
If we have doubly sized SQEs, then we need to shift the sq index by 1
to account for using two entries for a single request. The CQE dumping
gets this right, but the SQE one does not.
Improve the SQE dumping in general, the information dumped is pretty
sparse and doesn't even cover the whole basic part of the SQE. Include
information on the extended part of the SQE, if doubly sized SQEs are
in use. A typical dump now looks like the following:
[...]
SQEs: 32
32: opcode:URING_CMD, fd:0, flags:1, off:3225964160, addr:0x0, rw_flags:0x0, buf_index:0 user_data:2721, e0:0x0, e1:0xffffb8041000, e2:0x100000000000, e3:0x5500, e4:0x7, e5:0x0, e6:0x0, e7:0x0
33: opcode:URING_CMD, fd:0, flags:1, off:3225964160, addr:0x0, rw_flags:0x0, buf_index:0 user_data:2722, e0:0x0, e1:0xffffb8043000, e2:0x100000000000, e3:0x5508, e4:0x7, e5:0x0, e6:0x0, e7:0x0
34: opcode:URING_CMD, fd:0, flags:1, off:3225964160, addr:0x0, rw_flags:0x0, buf_index:0 user_data:2723, e0:0x0, e1:0xffffb8045000, e2:0x100000000000, e3:0x5510, e4:0x7, e5:0x0, e6:0x0, e7:0x0
[...]
Fixes: ebdeb7c01d02 ("io_uring: add support for 128-byte SQEs")
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2022-09-11 12:40:37 +00:00
|
|
|
sq_idx = READ_ONCE(ctx->sq_array[entry & sq_mask]);
|
2022-05-25 16:40:19 +00:00
|
|
|
if (sq_idx > sq_mask)
|
|
|
|
continue;
|
2022-10-11 00:59:57 +00:00
|
|
|
sqe = &ctx->sq_sqes[sq_idx << sq_shift];
|
io_uring/fdinfo: fix sqe dumping for IORING_SETUP_SQE128
If we have doubly sized SQEs, then we need to shift the sq index by 1
to account for using two entries for a single request. The CQE dumping
gets this right, but the SQE one does not.
Improve the SQE dumping in general, the information dumped is pretty
sparse and doesn't even cover the whole basic part of the SQE. Include
information on the extended part of the SQE, if doubly sized SQEs are
in use. A typical dump now looks like the following:
[...]
SQEs: 32
32: opcode:URING_CMD, fd:0, flags:1, off:3225964160, addr:0x0, rw_flags:0x0, buf_index:0 user_data:2721, e0:0x0, e1:0xffffb8041000, e2:0x100000000000, e3:0x5500, e4:0x7, e5:0x0, e6:0x0, e7:0x0
33: opcode:URING_CMD, fd:0, flags:1, off:3225964160, addr:0x0, rw_flags:0x0, buf_index:0 user_data:2722, e0:0x0, e1:0xffffb8043000, e2:0x100000000000, e3:0x5508, e4:0x7, e5:0x0, e6:0x0, e7:0x0
34: opcode:URING_CMD, fd:0, flags:1, off:3225964160, addr:0x0, rw_flags:0x0, buf_index:0 user_data:2723, e0:0x0, e1:0xffffb8045000, e2:0x100000000000, e3:0x5510, e4:0x7, e5:0x0, e6:0x0, e7:0x0
[...]
Fixes: ebdeb7c01d02 ("io_uring: add support for 128-byte SQEs")
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2022-09-11 12:40:37 +00:00
|
|
|
seq_printf(m, "%5u: opcode:%s, fd:%d, flags:%x, off:%llu, "
|
|
|
|
"addr:0x%llx, rw_flags:0x%x, buf_index:%d "
|
|
|
|
"user_data:%llu",
|
|
|
|
sq_idx, io_uring_get_opcode(sqe->opcode), sqe->fd,
|
|
|
|
sqe->flags, (unsigned long long) sqe->off,
|
|
|
|
(unsigned long long) sqe->addr, sqe->rw_flags,
|
|
|
|
sqe->buf_index, sqe->user_data);
|
|
|
|
if (sq_shift) {
|
|
|
|
u64 *sqeb = (void *) (sqe + 1);
|
|
|
|
int size = sizeof(struct io_uring_sqe) / sizeof(u64);
|
|
|
|
int j;
|
|
|
|
|
|
|
|
for (j = 0; j < size; j++) {
|
|
|
|
seq_printf(m, ", e%d:0x%llx", j,
|
|
|
|
(unsigned long long) *sqeb);
|
|
|
|
sqeb++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
seq_printf(m, "\n");
|
2022-05-25 16:40:19 +00:00
|
|
|
}
|
|
|
|
seq_printf(m, "CQEs:\t%u\n", cq_tail - cq_head);
|
|
|
|
cq_entries = min(cq_tail - cq_head, ctx->cq_entries);
|
|
|
|
for (i = 0; i < cq_entries; i++) {
|
|
|
|
unsigned int entry = i + cq_head;
|
|
|
|
struct io_uring_cqe *cqe = &r->cqes[(entry & cq_mask) << cq_shift];
|
|
|
|
|
2022-09-11 12:36:09 +00:00
|
|
|
seq_printf(m, "%5u: user_data:%llu, res:%d, flag:%x",
|
2022-05-25 16:40:19 +00:00
|
|
|
entry & cq_mask, cqe->user_data, cqe->res,
|
|
|
|
cqe->flags);
|
2022-09-11 12:36:09 +00:00
|
|
|
if (cq_shift)
|
|
|
|
seq_printf(m, ", extra1:%llu, extra2:%llu\n",
|
|
|
|
cqe->big_cqe[0], cqe->big_cqe[1]);
|
|
|
|
seq_printf(m, "\n");
|
2022-05-25 16:40:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Avoid ABBA deadlock between the seq lock and the io_uring mutex,
|
|
|
|
* since fdinfo case grabs it in the opposite direction of normal use
|
|
|
|
* cases. If we fail to get the lock, we just don't iterate any
|
|
|
|
* structures that could be going away outside the io_uring mutex.
|
|
|
|
*/
|
|
|
|
has_lock = mutex_trylock(&ctx->uring_lock);
|
|
|
|
|
|
|
|
if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) {
|
2023-10-21 18:30:29 +00:00
|
|
|
struct io_sq_data *sq = ctx->sq_data;
|
|
|
|
|
2024-03-09 00:32:56 +00:00
|
|
|
/*
|
|
|
|
* sq->thread might be NULL if we raced with the sqpoll
|
|
|
|
* thread termination.
|
|
|
|
*/
|
|
|
|
if (sq->thread) {
|
|
|
|
sq_pid = sq->task_pid;
|
|
|
|
sq_cpu = sq->sq_cpu;
|
|
|
|
getrusage(sq->thread, RUSAGE_SELF, &sq_usage);
|
|
|
|
sq_total_time = (sq_usage.ru_stime.tv_sec * 1000000
|
|
|
|
+ sq_usage.ru_stime.tv_usec);
|
|
|
|
sq_work_time = sq->work_time;
|
|
|
|
}
|
2022-05-25 16:40:19 +00:00
|
|
|
}
|
|
|
|
|
2023-10-21 18:30:29 +00:00
|
|
|
seq_printf(m, "SqThread:\t%d\n", sq_pid);
|
|
|
|
seq_printf(m, "SqThreadCpu:\t%d\n", sq_cpu);
|
2024-02-28 09:12:51 +00:00
|
|
|
seq_printf(m, "SqTotalTime:\t%llu\n", sq_total_time);
|
|
|
|
seq_printf(m, "SqWorkTime:\t%llu\n", sq_work_time);
|
2024-10-26 20:50:13 +00:00
|
|
|
seq_printf(m, "UserFiles:\t%u\n", ctx->file_table.data.nr);
|
|
|
|
for (i = 0; has_lock && i < ctx->file_table.data.nr; i++) {
|
2024-10-28 14:35:05 +00:00
|
|
|
struct file *f = NULL;
|
2022-05-25 16:40:19 +00:00
|
|
|
|
2024-10-28 14:35:05 +00:00
|
|
|
if (ctx->file_table.data.nodes[i])
|
|
|
|
f = io_slot_file(ctx->file_table.data.nodes[i]);
|
2022-05-25 16:40:19 +00:00
|
|
|
if (f)
|
|
|
|
seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
|
|
|
|
else
|
|
|
|
seq_printf(m, "%5u: <none>\n", i);
|
|
|
|
}
|
2024-10-26 20:50:13 +00:00
|
|
|
seq_printf(m, "UserBufs:\t%u\n", ctx->buf_table.nr);
|
|
|
|
for (i = 0; has_lock && i < ctx->buf_table.nr; i++) {
|
2024-10-30 15:51:58 +00:00
|
|
|
struct io_mapped_ubuf *buf = NULL;
|
2022-05-25 16:40:19 +00:00
|
|
|
|
2024-10-30 15:51:58 +00:00
|
|
|
if (ctx->buf_table.nodes[i])
|
|
|
|
buf = ctx->buf_table.nodes[i]->buf;
|
|
|
|
if (buf)
|
|
|
|
seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, buf->len);
|
|
|
|
else
|
|
|
|
seq_printf(m, "%5u: <none>\n", i);
|
2022-05-25 16:40:19 +00:00
|
|
|
}
|
|
|
|
if (has_lock && !xa_empty(&ctx->personalities)) {
|
|
|
|
unsigned long index;
|
|
|
|
const struct cred *cred;
|
|
|
|
|
|
|
|
seq_printf(m, "Personalities:\n");
|
|
|
|
xa_for_each(&ctx->personalities, index, cred)
|
|
|
|
io_uring_show_cred(m, index, cred);
|
|
|
|
}
|
|
|
|
|
|
|
|
seq_puts(m, "PollList:\n");
|
2024-09-30 20:22:36 +00:00
|
|
|
for (i = 0; has_lock && i < (1U << ctx->cancel_table.hash_bits); i++) {
|
2022-06-16 09:22:10 +00:00
|
|
|
struct io_hash_bucket *hb = &ctx->cancel_table.hbs[i];
|
2022-05-25 16:40:19 +00:00
|
|
|
struct io_kiocb *req;
|
|
|
|
|
2022-06-16 09:22:02 +00:00
|
|
|
hlist_for_each_entry(req, &hb->list, hash_node)
|
2022-05-25 16:40:19 +00:00
|
|
|
seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
|
io_uring: move struct io_kiocb from task_struct to io_uring_task
Rather than store the task_struct itself in struct io_kiocb, store
the io_uring specific task_struct. The life times are the same in terms
of io_uring, and this avoids doing some dereferences through the
task_struct. For the hot path of putting local task references, we can
deref req->tctx instead, which we'll need anyway in that function
regardless of whether it's local or remote references.
This is mostly straight forward, except the original task PF_EXITING
check needs a bit of tweaking. task_work is _always_ run from the
originating task, except in the fallback case, where it's run from a
kernel thread. Replace the potentially racy (in case of fallback work)
checks for req->task->flags with current->flags. It's either the still
the original task, in which case PF_EXITING will be sane, or it has
PF_KTHREAD set, in which case it's fallback work. Both cases should
prevent moving forward with the given request.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2024-11-03 17:23:38 +00:00
|
|
|
task_work_pending(req->tctx->task));
|
2022-05-25 16:40:19 +00:00
|
|
|
}
|
|
|
|
|
2023-01-10 17:24:52 +00:00
|
|
|
if (has_lock)
|
|
|
|
mutex_unlock(&ctx->uring_lock);
|
|
|
|
|
2022-05-25 16:40:19 +00:00
|
|
|
seq_puts(m, "CqOverflowList:\n");
|
2022-06-16 09:22:02 +00:00
|
|
|
spin_lock(&ctx->completion_lock);
|
2022-05-25 16:40:19 +00:00
|
|
|
list_for_each_entry(ocqe, &ctx->cq_overflow_list, list) {
|
|
|
|
struct io_uring_cqe *cqe = &ocqe->cqe;
|
|
|
|
|
|
|
|
seq_printf(m, " user_data=%llu, res=%d, flags=%x\n",
|
|
|
|
cqe->user_data, cqe->res, cqe->flags);
|
|
|
|
|
|
|
|
}
|
|
|
|
spin_unlock(&ctx->completion_lock);
|
2024-10-13 18:29:24 +00:00
|
|
|
napi_show_fdinfo(ctx, m);
|
2022-05-25 16:40:19 +00:00
|
|
|
}
|
|
|
|
#endif
|