forked from Minki/linux
Alexei Starovoitov says: ==================== pull-request: bpf-next 2021-03-24 The following pull-request contains BPF updates for your *net-next* tree. We've added 37 non-merge commits during the last 15 day(s) which contain a total of 65 files changed, 3200 insertions(+), 738 deletions(-). The main changes are: 1) Static linking of multiple BPF ELF files, from Andrii. 2) Move drop error path to devmap for XDP_REDIRECT, from Lorenzo. 3) Spelling fixes from various folks. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
241949e488
@ -1209,21 +1209,67 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
|
||||
*/
|
||||
case BPF_STX | BPF_ATOMIC | BPF_DW:
|
||||
case BPF_STX | BPF_ATOMIC | BPF_W:
|
||||
if (insn->imm != BPF_ADD) {
|
||||
{
|
||||
bool is32 = BPF_SIZE(insn->code) == BPF_W;
|
||||
|
||||
switch (insn->imm) {
|
||||
/* {op32|op64} {%w0|%src},%src,off(%dst) */
|
||||
#define EMIT_ATOMIC(op32, op64) do { \
|
||||
EMIT6_DISP_LH(0xeb000000, is32 ? (op32) : (op64), \
|
||||
(insn->imm & BPF_FETCH) ? src_reg : REG_W0, \
|
||||
src_reg, dst_reg, off); \
|
||||
if (is32 && (insn->imm & BPF_FETCH)) \
|
||||
EMIT_ZERO(src_reg); \
|
||||
} while (0)
|
||||
case BPF_ADD:
|
||||
case BPF_ADD | BPF_FETCH:
|
||||
/* {laal|laalg} */
|
||||
EMIT_ATOMIC(0x00fa, 0x00ea);
|
||||
break;
|
||||
case BPF_AND:
|
||||
case BPF_AND | BPF_FETCH:
|
||||
/* {lan|lang} */
|
||||
EMIT_ATOMIC(0x00f4, 0x00e4);
|
||||
break;
|
||||
case BPF_OR:
|
||||
case BPF_OR | BPF_FETCH:
|
||||
/* {lao|laog} */
|
||||
EMIT_ATOMIC(0x00f6, 0x00e6);
|
||||
break;
|
||||
case BPF_XOR:
|
||||
case BPF_XOR | BPF_FETCH:
|
||||
/* {lax|laxg} */
|
||||
EMIT_ATOMIC(0x00f7, 0x00e7);
|
||||
break;
|
||||
#undef EMIT_ATOMIC
|
||||
case BPF_XCHG:
|
||||
/* {ly|lg} %w0,off(%dst) */
|
||||
EMIT6_DISP_LH(0xe3000000,
|
||||
is32 ? 0x0058 : 0x0004, REG_W0, REG_0,
|
||||
dst_reg, off);
|
||||
/* 0: {csy|csg} %w0,%src,off(%dst) */
|
||||
EMIT6_DISP_LH(0xeb000000, is32 ? 0x0014 : 0x0030,
|
||||
REG_W0, src_reg, dst_reg, off);
|
||||
/* brc 4,0b */
|
||||
EMIT4_PCREL_RIC(0xa7040000, 4, jit->prg - 6);
|
||||
/* {llgfr|lgr} %src,%w0 */
|
||||
EMIT4(is32 ? 0xb9160000 : 0xb9040000, src_reg, REG_W0);
|
||||
if (is32 && insn_is_zext(&insn[1]))
|
||||
insn_count = 2;
|
||||
break;
|
||||
case BPF_CMPXCHG:
|
||||
/* 0: {csy|csg} %b0,%src,off(%dst) */
|
||||
EMIT6_DISP_LH(0xeb000000, is32 ? 0x0014 : 0x0030,
|
||||
BPF_REG_0, src_reg, dst_reg, off);
|
||||
break;
|
||||
default:
|
||||
pr_err("Unknown atomic operation %02x\n", insn->imm);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* *(u32/u64 *)(dst + off) += src
|
||||
*
|
||||
* BFW_W: laal %w0,%src,off(%dst)
|
||||
* BPF_DW: laalg %w0,%src,off(%dst)
|
||||
*/
|
||||
EMIT6_DISP_LH(0xeb000000,
|
||||
BPF_SIZE(insn->code) == BPF_W ? 0x00fa : 0x00ea,
|
||||
REG_W0, src_reg, dst_reg, off);
|
||||
jit->seen |= SEEN_MEM;
|
||||
break;
|
||||
}
|
||||
/*
|
||||
* BPF_LDX
|
||||
*/
|
||||
|
@ -300,7 +300,7 @@ static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
|
||||
|
||||
rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &push_hdr, &push_len);
|
||||
if (unlikely(rc))
|
||||
goto error_drop_packet;
|
||||
return rc;
|
||||
|
||||
ena_tx_ctx.ena_bufs = tx_info->bufs;
|
||||
ena_tx_ctx.push_header = push_hdr;
|
||||
@ -330,8 +330,6 @@ static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
|
||||
error_unmap_dma:
|
||||
ena_unmap_tx_buff(xdp_ring, tx_info);
|
||||
tx_info->xdpf = NULL;
|
||||
error_drop_packet:
|
||||
xdp_return_frame(xdpf);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -339,8 +337,8 @@ static int ena_xdp_xmit(struct net_device *dev, int n,
|
||||
struct xdp_frame **frames, u32 flags)
|
||||
{
|
||||
struct ena_adapter *adapter = netdev_priv(dev);
|
||||
int qid, i, err, drops = 0;
|
||||
struct ena_ring *xdp_ring;
|
||||
int qid, i, nxmit = 0;
|
||||
|
||||
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
|
||||
return -EINVAL;
|
||||
@ -360,12 +358,9 @@ static int ena_xdp_xmit(struct net_device *dev, int n,
|
||||
spin_lock(&xdp_ring->xdp_tx_lock);
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
err = ena_xdp_xmit_frame(xdp_ring, dev, frames[i], 0);
|
||||
/* The descriptor is freed by ena_xdp_xmit_frame in case
|
||||
* of an error.
|
||||
*/
|
||||
if (err)
|
||||
drops++;
|
||||
if (ena_xdp_xmit_frame(xdp_ring, dev, frames[i], 0))
|
||||
break;
|
||||
nxmit++;
|
||||
}
|
||||
|
||||
/* Ring doorbell to make device aware of the packets */
|
||||
@ -378,7 +373,7 @@ static int ena_xdp_xmit(struct net_device *dev, int n,
|
||||
spin_unlock(&xdp_ring->xdp_tx_lock);
|
||||
|
||||
/* Return number of packets sent */
|
||||
return n - drops;
|
||||
return nxmit;
|
||||
}
|
||||
|
||||
static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
|
||||
@ -415,7 +410,9 @@ static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
|
||||
/* The XDP queues are shared between XDP_TX and XDP_REDIRECT */
|
||||
spin_lock(&xdp_ring->xdp_tx_lock);
|
||||
|
||||
ena_xdp_xmit_frame(xdp_ring, rx_ring->netdev, xdpf, XDP_XMIT_FLUSH);
|
||||
if (ena_xdp_xmit_frame(xdp_ring, rx_ring->netdev, xdpf,
|
||||
XDP_XMIT_FLUSH))
|
||||
xdp_return_frame(xdpf);
|
||||
|
||||
spin_unlock(&xdp_ring->xdp_tx_lock);
|
||||
xdp_stat = &rx_ring->rx_stats.xdp_tx;
|
||||
|
@ -217,7 +217,7 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
|
||||
struct pci_dev *pdev = bp->pdev;
|
||||
struct bnxt_tx_ring_info *txr;
|
||||
dma_addr_t mapping;
|
||||
int drops = 0;
|
||||
int nxmit = 0;
|
||||
int ring;
|
||||
int i;
|
||||
|
||||
@ -233,21 +233,17 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
|
||||
struct xdp_frame *xdp = frames[i];
|
||||
|
||||
if (!txr || !bnxt_tx_avail(bp, txr) ||
|
||||
!(bp->bnapi[ring]->flags & BNXT_NAPI_FLAG_XDP)) {
|
||||
xdp_return_frame_rx_napi(xdp);
|
||||
drops++;
|
||||
continue;
|
||||
}
|
||||
!(bp->bnapi[ring]->flags & BNXT_NAPI_FLAG_XDP))
|
||||
break;
|
||||
|
||||
mapping = dma_map_single(&pdev->dev, xdp->data, xdp->len,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
if (dma_mapping_error(&pdev->dev, mapping)) {
|
||||
xdp_return_frame_rx_napi(xdp);
|
||||
drops++;
|
||||
continue;
|
||||
}
|
||||
if (dma_mapping_error(&pdev->dev, mapping))
|
||||
break;
|
||||
|
||||
__bnxt_xmit_xdp_redirect(bp, txr, mapping, xdp->len, xdp);
|
||||
nxmit++;
|
||||
}
|
||||
|
||||
if (flags & XDP_XMIT_FLUSH) {
|
||||
@ -256,7 +252,7 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
|
||||
bnxt_db_write(bp, &txr->tx_db, txr->tx_prod);
|
||||
}
|
||||
|
||||
return num_frames - drops;
|
||||
return nxmit;
|
||||
}
|
||||
|
||||
/* Under rtnl_lock */
|
||||
|
@ -3081,7 +3081,7 @@ static int dpaa_xdp_xmit(struct net_device *net_dev, int n,
|
||||
struct xdp_frame **frames, u32 flags)
|
||||
{
|
||||
struct xdp_frame *xdpf;
|
||||
int i, err, drops = 0;
|
||||
int i, nxmit = 0;
|
||||
|
||||
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
|
||||
return -EINVAL;
|
||||
@ -3091,14 +3091,12 @@ static int dpaa_xdp_xmit(struct net_device *net_dev, int n,
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
xdpf = frames[i];
|
||||
err = dpaa_xdp_xmit_frame(net_dev, xdpf);
|
||||
if (err) {
|
||||
xdp_return_frame_rx_napi(xdpf);
|
||||
drops++;
|
||||
}
|
||||
if (dpaa_xdp_xmit_frame(net_dev, xdpf))
|
||||
break;
|
||||
nxmit++;
|
||||
}
|
||||
|
||||
return n - drops;
|
||||
return nxmit;
|
||||
}
|
||||
|
||||
static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
|
||||
|
@ -2431,8 +2431,6 @@ static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
|
||||
percpu_stats->tx_packets += enqueued;
|
||||
for (i = 0; i < enqueued; i++)
|
||||
percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]);
|
||||
for (i = enqueued; i < n; i++)
|
||||
xdp_return_frame_rx_napi(frames[i]);
|
||||
|
||||
return enqueued;
|
||||
}
|
||||
|
@ -3835,8 +3835,8 @@ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
||||
* @frames: array of XDP buffer pointers
|
||||
* @flags: XDP extra info
|
||||
*
|
||||
* Returns number of frames successfully sent. Frames that fail are
|
||||
* free'ed via XDP return API.
|
||||
* Returns number of frames successfully sent. Failed frames
|
||||
* will be free'ed by XDP core.
|
||||
*
|
||||
* For error cases, a negative errno code is returned and no-frames
|
||||
* are transmitted (caller must handle freeing frames).
|
||||
@ -3849,7 +3849,7 @@ int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
|
||||
struct i40e_vsi *vsi = np->vsi;
|
||||
struct i40e_pf *pf = vsi->back;
|
||||
struct i40e_ring *xdp_ring;
|
||||
int drops = 0;
|
||||
int nxmit = 0;
|
||||
int i;
|
||||
|
||||
if (test_bit(__I40E_VSI_DOWN, vsi->state))
|
||||
@ -3869,14 +3869,13 @@ int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
|
||||
int err;
|
||||
|
||||
err = i40e_xmit_xdp_ring(xdpf, xdp_ring);
|
||||
if (err != I40E_XDP_TX) {
|
||||
xdp_return_frame_rx_napi(xdpf);
|
||||
drops++;
|
||||
}
|
||||
if (err != I40E_XDP_TX)
|
||||
break;
|
||||
nxmit++;
|
||||
}
|
||||
|
||||
if (unlikely(flags & XDP_XMIT_FLUSH))
|
||||
i40e_xdp_ring_update_tail(xdp_ring);
|
||||
|
||||
return n - drops;
|
||||
return nxmit;
|
||||
}
|
||||
|
@ -554,8 +554,8 @@ ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
|
||||
* @frames: XDP frames to be transmitted
|
||||
* @flags: transmit flags
|
||||
*
|
||||
* Returns number of frames successfully sent. Frames that fail are
|
||||
* free'ed via XDP return API.
|
||||
* Returns number of frames successfully sent. Failed frames
|
||||
* will be free'ed by XDP core.
|
||||
* For error cases, a negative errno code is returned and no-frames
|
||||
* are transmitted (caller must handle freeing frames).
|
||||
*/
|
||||
@ -567,7 +567,7 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
|
||||
unsigned int queue_index = smp_processor_id();
|
||||
struct ice_vsi *vsi = np->vsi;
|
||||
struct ice_ring *xdp_ring;
|
||||
int drops = 0, i;
|
||||
int nxmit = 0, i;
|
||||
|
||||
if (test_bit(__ICE_DOWN, vsi->state))
|
||||
return -ENETDOWN;
|
||||
@ -584,16 +584,15 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
|
||||
int err;
|
||||
|
||||
err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
|
||||
if (err != ICE_XDP_TX) {
|
||||
xdp_return_frame_rx_napi(xdpf);
|
||||
drops++;
|
||||
}
|
||||
if (err != ICE_XDP_TX)
|
||||
break;
|
||||
nxmit++;
|
||||
}
|
||||
|
||||
if (unlikely(flags & XDP_XMIT_FLUSH))
|
||||
ice_xdp_ring_update_tail(xdp_ring);
|
||||
|
||||
return n - drops;
|
||||
return nxmit;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2934,7 +2934,7 @@ static int igb_xdp_xmit(struct net_device *dev, int n,
|
||||
int cpu = smp_processor_id();
|
||||
struct igb_ring *tx_ring;
|
||||
struct netdev_queue *nq;
|
||||
int drops = 0;
|
||||
int nxmit = 0;
|
||||
int i;
|
||||
|
||||
if (unlikely(test_bit(__IGB_DOWN, &adapter->state)))
|
||||
@ -2961,10 +2961,9 @@ static int igb_xdp_xmit(struct net_device *dev, int n,
|
||||
int err;
|
||||
|
||||
err = igb_xmit_xdp_ring(adapter, tx_ring, xdpf);
|
||||
if (err != IGB_XDP_TX) {
|
||||
xdp_return_frame_rx_napi(xdpf);
|
||||
drops++;
|
||||
}
|
||||
if (err != IGB_XDP_TX)
|
||||
break;
|
||||
nxmit++;
|
||||
}
|
||||
|
||||
__netif_tx_unlock(nq);
|
||||
@ -2972,7 +2971,7 @@ static int igb_xdp_xmit(struct net_device *dev, int n,
|
||||
if (unlikely(flags & XDP_XMIT_FLUSH))
|
||||
igb_xdp_ring_update_tail(tx_ring);
|
||||
|
||||
return n - drops;
|
||||
return nxmit;
|
||||
}
|
||||
|
||||
static const struct net_device_ops igb_netdev_ops = {
|
||||
|
@ -10189,7 +10189,7 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n,
|
||||
{
|
||||
struct ixgbe_adapter *adapter = netdev_priv(dev);
|
||||
struct ixgbe_ring *ring;
|
||||
int drops = 0;
|
||||
int nxmit = 0;
|
||||
int i;
|
||||
|
||||
if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state)))
|
||||
@ -10213,16 +10213,15 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n,
|
||||
int err;
|
||||
|
||||
err = ixgbe_xmit_xdp_ring(adapter, xdpf);
|
||||
if (err != IXGBE_XDP_TX) {
|
||||
xdp_return_frame_rx_napi(xdpf);
|
||||
drops++;
|
||||
}
|
||||
if (err != IXGBE_XDP_TX)
|
||||
break;
|
||||
nxmit++;
|
||||
}
|
||||
|
||||
if (unlikely(flags & XDP_XMIT_FLUSH))
|
||||
ixgbe_xdp_ring_update_tail(ring);
|
||||
|
||||
return n - drops;
|
||||
return nxmit;
|
||||
}
|
||||
|
||||
static const struct net_device_ops ixgbe_netdev_ops = {
|
||||
|
@ -2137,7 +2137,7 @@ mvneta_xdp_xmit(struct net_device *dev, int num_frame,
|
||||
{
|
||||
struct mvneta_port *pp = netdev_priv(dev);
|
||||
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
|
||||
int i, nxmit_byte = 0, nxmit = num_frame;
|
||||
int i, nxmit_byte = 0, nxmit = 0;
|
||||
int cpu = smp_processor_id();
|
||||
struct mvneta_tx_queue *txq;
|
||||
struct netdev_queue *nq;
|
||||
@ -2155,12 +2155,11 @@ mvneta_xdp_xmit(struct net_device *dev, int num_frame,
|
||||
__netif_tx_lock(nq, cpu);
|
||||
for (i = 0; i < num_frame; i++) {
|
||||
ret = mvneta_xdp_submit_frame(pp, txq, frames[i], true);
|
||||
if (ret == MVNETA_XDP_TX) {
|
||||
nxmit_byte += frames[i]->len;
|
||||
} else {
|
||||
xdp_return_frame_rx_napi(frames[i]);
|
||||
nxmit--;
|
||||
}
|
||||
if (ret != MVNETA_XDP_TX)
|
||||
break;
|
||||
|
||||
nxmit_byte += frames[i]->len;
|
||||
nxmit++;
|
||||
}
|
||||
|
||||
if (unlikely(flags & XDP_XMIT_FLUSH))
|
||||
|
@ -3744,7 +3744,7 @@ mvpp2_xdp_xmit(struct net_device *dev, int num_frame,
|
||||
struct xdp_frame **frames, u32 flags)
|
||||
{
|
||||
struct mvpp2_port *port = netdev_priv(dev);
|
||||
int i, nxmit_byte = 0, nxmit = num_frame;
|
||||
int i, nxmit_byte = 0, nxmit = 0;
|
||||
struct mvpp2_pcpu_stats *stats;
|
||||
u16 txq_id;
|
||||
u32 ret;
|
||||
@ -3762,12 +3762,11 @@ mvpp2_xdp_xmit(struct net_device *dev, int num_frame,
|
||||
|
||||
for (i = 0; i < num_frame; i++) {
|
||||
ret = mvpp2_xdp_submit_frame(port, txq_id, frames[i], true);
|
||||
if (ret == MVPP2_XDP_TX) {
|
||||
nxmit_byte += frames[i]->len;
|
||||
} else {
|
||||
xdp_return_frame_rx_napi(frames[i]);
|
||||
nxmit--;
|
||||
}
|
||||
if (ret != MVPP2_XDP_TX)
|
||||
break;
|
||||
|
||||
nxmit_byte += frames[i]->len;
|
||||
nxmit++;
|
||||
}
|
||||
|
||||
if (likely(nxmit > 0))
|
||||
|
@ -500,7 +500,7 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(dev);
|
||||
struct mlx5e_xdpsq *sq;
|
||||
int drops = 0;
|
||||
int nxmit = 0;
|
||||
int sq_num;
|
||||
int i;
|
||||
|
||||
@ -529,11 +529,8 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
|
||||
xdptxd.dma_addr = dma_map_single(sq->pdev, xdptxd.data,
|
||||
xdptxd.len, DMA_TO_DEVICE);
|
||||
|
||||
if (unlikely(dma_mapping_error(sq->pdev, xdptxd.dma_addr))) {
|
||||
xdp_return_frame_rx_napi(xdpf);
|
||||
drops++;
|
||||
continue;
|
||||
}
|
||||
if (unlikely(dma_mapping_error(sq->pdev, xdptxd.dma_addr)))
|
||||
break;
|
||||
|
||||
xdpi.mode = MLX5E_XDP_XMIT_MODE_FRAME;
|
||||
xdpi.frame.xdpf = xdpf;
|
||||
@ -544,9 +541,9 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
|
||||
if (unlikely(!ret)) {
|
||||
dma_unmap_single(sq->pdev, xdptxd.dma_addr,
|
||||
xdptxd.len, DMA_TO_DEVICE);
|
||||
xdp_return_frame_rx_napi(xdpf);
|
||||
drops++;
|
||||
break;
|
||||
}
|
||||
nxmit++;
|
||||
}
|
||||
|
||||
if (flags & XDP_XMIT_FLUSH) {
|
||||
@ -555,7 +552,7 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
|
||||
mlx5e_xmit_xdp_doorbell(sq);
|
||||
}
|
||||
|
||||
return n - drops;
|
||||
return nxmit;
|
||||
}
|
||||
|
||||
void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq)
|
||||
|
@ -345,7 +345,7 @@ int qede_xdp_transmit(struct net_device *dev, int n_frames,
|
||||
struct qede_tx_queue *xdp_tx;
|
||||
struct xdp_frame *xdpf;
|
||||
dma_addr_t mapping;
|
||||
int i, drops = 0;
|
||||
int i, nxmit = 0;
|
||||
u16 xdp_prod;
|
||||
|
||||
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
|
||||
@ -364,18 +364,13 @@ int qede_xdp_transmit(struct net_device *dev, int n_frames,
|
||||
|
||||
mapping = dma_map_single(dmadev, xdpf->data, xdpf->len,
|
||||
DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(dmadev, mapping))) {
|
||||
xdp_return_frame_rx_napi(xdpf);
|
||||
drops++;
|
||||
|
||||
continue;
|
||||
}
|
||||
if (unlikely(dma_mapping_error(dmadev, mapping)))
|
||||
break;
|
||||
|
||||
if (unlikely(qede_xdp_xmit(xdp_tx, mapping, 0, xdpf->len,
|
||||
NULL, xdpf))) {
|
||||
xdp_return_frame_rx_napi(xdpf);
|
||||
drops++;
|
||||
}
|
||||
NULL, xdpf)))
|
||||
break;
|
||||
nxmit++;
|
||||
}
|
||||
|
||||
if (flags & XDP_XMIT_FLUSH) {
|
||||
@ -387,7 +382,7 @@ int qede_xdp_transmit(struct net_device *dev, int n_frames,
|
||||
|
||||
spin_unlock(&xdp_tx->xdp_tx_lock);
|
||||
|
||||
return n_frames - drops;
|
||||
return nxmit;
|
||||
}
|
||||
|
||||
int qede_txq_has_work(struct qede_tx_queue *txq)
|
||||
|
@ -412,14 +412,6 @@ err:
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
static void efx_xdp_return_frames(int n, struct xdp_frame **xdpfs)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < n; i++)
|
||||
xdp_return_frame_rx_napi(xdpfs[i]);
|
||||
}
|
||||
|
||||
/* Transmit a packet from an XDP buffer
|
||||
*
|
||||
* Returns number of packets sent on success, error code otherwise.
|
||||
@ -492,12 +484,7 @@ int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
|
||||
if (flush && i > 0)
|
||||
efx_nic_push_buffers(tx_queue);
|
||||
|
||||
if (i == 0)
|
||||
return -EIO;
|
||||
|
||||
efx_xdp_return_frames(n - i, xdpfs + i);
|
||||
|
||||
return i;
|
||||
return i == 0 ? -EIO : i;
|
||||
}
|
||||
|
||||
/* Initiate a packet transmission. We use one channel per CPU
|
||||
|
@ -1760,8 +1760,7 @@ static int netsec_xdp_xmit(struct net_device *ndev, int n,
|
||||
{
|
||||
struct netsec_priv *priv = netdev_priv(ndev);
|
||||
struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX];
|
||||
int drops = 0;
|
||||
int i;
|
||||
int i, nxmit = 0;
|
||||
|
||||
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
|
||||
return -EINVAL;
|
||||
@ -1772,12 +1771,11 @@ static int netsec_xdp_xmit(struct net_device *ndev, int n,
|
||||
int err;
|
||||
|
||||
err = netsec_xdp_queue_one(priv, xdpf, true);
|
||||
if (err != NETSEC_XDP_TX) {
|
||||
xdp_return_frame_rx_napi(xdpf);
|
||||
drops++;
|
||||
} else {
|
||||
tx_ring->xdp_xmit++;
|
||||
}
|
||||
if (err != NETSEC_XDP_TX)
|
||||
break;
|
||||
|
||||
tx_ring->xdp_xmit++;
|
||||
nxmit++;
|
||||
}
|
||||
spin_unlock(&tx_ring->lock);
|
||||
|
||||
@ -1786,7 +1784,7 @@ static int netsec_xdp_xmit(struct net_device *ndev, int n,
|
||||
tx_ring->xdp_xmit = 0;
|
||||
}
|
||||
|
||||
return n - drops;
|
||||
return nxmit;
|
||||
}
|
||||
|
||||
static int netsec_xdp_setup(struct netsec_priv *priv, struct bpf_prog *prog,
|
||||
|
@ -1123,25 +1123,23 @@ static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n,
|
||||
struct cpsw_priv *priv = netdev_priv(ndev);
|
||||
struct cpsw_common *cpsw = priv->cpsw;
|
||||
struct xdp_frame *xdpf;
|
||||
int i, drops = 0, port;
|
||||
int i, nxmit = 0, port;
|
||||
|
||||
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
xdpf = frames[i];
|
||||
if (xdpf->len < CPSW_MIN_PACKET_SIZE) {
|
||||
xdp_return_frame_rx_napi(xdpf);
|
||||
drops++;
|
||||
continue;
|
||||
}
|
||||
if (xdpf->len < CPSW_MIN_PACKET_SIZE)
|
||||
break;
|
||||
|
||||
port = priv->emac_port + cpsw->data.dual_emac;
|
||||
if (cpsw_xdp_tx_frame(priv, xdpf, NULL, port))
|
||||
drops++;
|
||||
break;
|
||||
nxmit++;
|
||||
}
|
||||
|
||||
return n - drops;
|
||||
return nxmit;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
|
@ -1093,24 +1093,22 @@ static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n,
|
||||
{
|
||||
struct cpsw_priv *priv = netdev_priv(ndev);
|
||||
struct xdp_frame *xdpf;
|
||||
int i, drops = 0;
|
||||
int i, nxmit = 0;
|
||||
|
||||
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
xdpf = frames[i];
|
||||
if (xdpf->len < CPSW_MIN_PACKET_SIZE) {
|
||||
xdp_return_frame_rx_napi(xdpf);
|
||||
drops++;
|
||||
continue;
|
||||
}
|
||||
if (xdpf->len < CPSW_MIN_PACKET_SIZE)
|
||||
break;
|
||||
|
||||
if (cpsw_xdp_tx_frame(priv, xdpf, NULL, priv->emac_port))
|
||||
drops++;
|
||||
break;
|
||||
nxmit++;
|
||||
}
|
||||
|
||||
return n - drops;
|
||||
return nxmit;
|
||||
}
|
||||
|
||||
static int cpsw_get_port_parent_id(struct net_device *ndev,
|
||||
|
@ -1305,19 +1305,15 @@ int cpsw_xdp_tx_frame(struct cpsw_priv *priv, struct xdp_frame *xdpf,
|
||||
ret = cpdma_chan_submit_mapped(txch, cpsw_xdpf_to_handle(xdpf),
|
||||
dma, xdpf->len, port);
|
||||
} else {
|
||||
if (sizeof(*xmeta) > xdpf->headroom) {
|
||||
xdp_return_frame_rx_napi(xdpf);
|
||||
if (sizeof(*xmeta) > xdpf->headroom)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = cpdma_chan_submit(txch, cpsw_xdpf_to_handle(xdpf),
|
||||
xdpf->data, xdpf->len, port);
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
if (ret)
|
||||
priv->ndev->stats.tx_dropped++;
|
||||
xdp_return_frame_rx_napi(xdpf);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1353,7 +1349,8 @@ int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
|
||||
if (unlikely(!xdpf))
|
||||
goto drop;
|
||||
|
||||
cpsw_xdp_tx_frame(priv, xdpf, page, port);
|
||||
if (cpsw_xdp_tx_frame(priv, xdpf, page, port))
|
||||
xdp_return_frame_rx_napi(xdpf);
|
||||
break;
|
||||
case XDP_REDIRECT:
|
||||
if (xdp_do_redirect(ndev, xdp, prog))
|
||||
|
@ -1181,8 +1181,7 @@ static int tun_xdp_xmit(struct net_device *dev, int n,
|
||||
struct tun_struct *tun = netdev_priv(dev);
|
||||
struct tun_file *tfile;
|
||||
u32 numqueues;
|
||||
int drops = 0;
|
||||
int cnt = n;
|
||||
int nxmit = 0;
|
||||
int i;
|
||||
|
||||
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
|
||||
@ -1212,9 +1211,9 @@ resample:
|
||||
|
||||
if (__ptr_ring_produce(&tfile->tx_ring, frame)) {
|
||||
atomic_long_inc(&dev->tx_dropped);
|
||||
xdp_return_frame_rx_napi(xdp);
|
||||
drops++;
|
||||
break;
|
||||
}
|
||||
nxmit++;
|
||||
}
|
||||
spin_unlock(&tfile->tx_ring.producer_lock);
|
||||
|
||||
@ -1222,17 +1221,21 @@ resample:
|
||||
__tun_xdp_flush_tfile(tfile);
|
||||
|
||||
rcu_read_unlock();
|
||||
return cnt - drops;
|
||||
return nxmit;
|
||||
}
|
||||
|
||||
static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp)
|
||||
{
|
||||
struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp);
|
||||
int nxmit;
|
||||
|
||||
if (unlikely(!frame))
|
||||
return -EOVERFLOW;
|
||||
|
||||
return tun_xdp_xmit(dev, 1, &frame, XDP_XMIT_FLUSH);
|
||||
nxmit = tun_xdp_xmit(dev, 1, &frame, XDP_XMIT_FLUSH);
|
||||
if (!nxmit)
|
||||
xdp_return_frame_rx_napi(frame);
|
||||
return nxmit;
|
||||
}
|
||||
|
||||
static const struct net_device_ops tap_netdev_ops = {
|
||||
|
@ -433,7 +433,7 @@ static int veth_xdp_xmit(struct net_device *dev, int n,
|
||||
u32 flags, bool ndo_xmit)
|
||||
{
|
||||
struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
|
||||
int i, ret = -ENXIO, drops = 0;
|
||||
int i, ret = -ENXIO, nxmit = 0;
|
||||
struct net_device *rcv;
|
||||
unsigned int max_len;
|
||||
struct veth_rq *rq;
|
||||
@ -463,21 +463,20 @@ static int veth_xdp_xmit(struct net_device *dev, int n,
|
||||
void *ptr = veth_xdp_to_ptr(frame);
|
||||
|
||||
if (unlikely(frame->len > max_len ||
|
||||
__ptr_ring_produce(&rq->xdp_ring, ptr))) {
|
||||
xdp_return_frame_rx_napi(frame);
|
||||
drops++;
|
||||
}
|
||||
__ptr_ring_produce(&rq->xdp_ring, ptr)))
|
||||
break;
|
||||
nxmit++;
|
||||
}
|
||||
spin_unlock(&rq->xdp_ring.producer_lock);
|
||||
|
||||
if (flags & XDP_XMIT_FLUSH)
|
||||
__veth_xdp_flush(rq);
|
||||
|
||||
ret = n - drops;
|
||||
ret = nxmit;
|
||||
if (ndo_xmit) {
|
||||
u64_stats_update_begin(&rq->stats.syncp);
|
||||
rq->stats.vs.peer_tq_xdp_xmit += n - drops;
|
||||
rq->stats.vs.peer_tq_xdp_xmit_err += drops;
|
||||
rq->stats.vs.peer_tq_xdp_xmit += nxmit;
|
||||
rq->stats.vs.peer_tq_xdp_xmit_err += n - nxmit;
|
||||
u64_stats_update_end(&rq->stats.syncp);
|
||||
}
|
||||
|
||||
@ -504,20 +503,23 @@ static int veth_ndo_xdp_xmit(struct net_device *dev, int n,
|
||||
|
||||
static void veth_xdp_flush_bq(struct veth_rq *rq, struct veth_xdp_tx_bq *bq)
|
||||
{
|
||||
int sent, i, err = 0;
|
||||
int sent, i, err = 0, drops;
|
||||
|
||||
sent = veth_xdp_xmit(rq->dev, bq->count, bq->q, 0, false);
|
||||
if (sent < 0) {
|
||||
err = sent;
|
||||
sent = 0;
|
||||
for (i = 0; i < bq->count; i++)
|
||||
xdp_return_frame(bq->q[i]);
|
||||
}
|
||||
trace_xdp_bulk_tx(rq->dev, sent, bq->count - sent, err);
|
||||
|
||||
for (i = sent; unlikely(i < bq->count); i++)
|
||||
xdp_return_frame(bq->q[i]);
|
||||
|
||||
drops = bq->count - sent;
|
||||
trace_xdp_bulk_tx(rq->dev, sent, drops, err);
|
||||
|
||||
u64_stats_update_begin(&rq->stats.syncp);
|
||||
rq->stats.vs.xdp_tx += sent;
|
||||
rq->stats.vs.xdp_tx_err += bq->count - sent;
|
||||
rq->stats.vs.xdp_tx_err += drops;
|
||||
u64_stats_update_end(&rq->stats.syncp);
|
||||
|
||||
bq->count = 0;
|
||||
|
@ -531,10 +531,10 @@ static int virtnet_xdp_xmit(struct net_device *dev,
|
||||
unsigned int len;
|
||||
int packets = 0;
|
||||
int bytes = 0;
|
||||
int drops = 0;
|
||||
int nxmit = 0;
|
||||
int kicks = 0;
|
||||
int ret, err;
|
||||
void *ptr;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
/* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
|
||||
@ -548,7 +548,6 @@ static int virtnet_xdp_xmit(struct net_device *dev,
|
||||
|
||||
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
|
||||
ret = -EINVAL;
|
||||
drops = n;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -571,13 +570,11 @@ static int virtnet_xdp_xmit(struct net_device *dev,
|
||||
for (i = 0; i < n; i++) {
|
||||
struct xdp_frame *xdpf = frames[i];
|
||||
|
||||
err = __virtnet_xdp_xmit_one(vi, sq, xdpf);
|
||||
if (err) {
|
||||
xdp_return_frame_rx_napi(xdpf);
|
||||
drops++;
|
||||
}
|
||||
if (__virtnet_xdp_xmit_one(vi, sq, xdpf))
|
||||
break;
|
||||
nxmit++;
|
||||
}
|
||||
ret = n - drops;
|
||||
ret = nxmit;
|
||||
|
||||
if (flags & XDP_XMIT_FLUSH) {
|
||||
if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
|
||||
@ -588,7 +585,7 @@ out:
|
||||
sq->stats.bytes += bytes;
|
||||
sq->stats.packets += packets;
|
||||
sq->stats.xdp_tx += n;
|
||||
sq->stats.xdp_tx_drops += drops;
|
||||
sq->stats.xdp_tx_drops += n - nxmit;
|
||||
sq->stats.kicks += kicks;
|
||||
u64_stats_update_end(&sq->stats.syncp);
|
||||
|
||||
@ -742,7 +739,9 @@ static struct sk_buff *receive_small(struct net_device *dev,
|
||||
if (unlikely(!xdpf))
|
||||
goto err_xdp;
|
||||
err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
|
||||
if (unlikely(err < 0)) {
|
||||
if (unlikely(!err)) {
|
||||
xdp_return_frame_rx_napi(xdpf);
|
||||
} else if (unlikely(err < 0)) {
|
||||
trace_xdp_exception(vi->dev, xdp_prog, act);
|
||||
goto err_xdp;
|
||||
}
|
||||
@ -929,7 +928,9 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
|
||||
if (unlikely(!xdpf))
|
||||
goto err_xdp;
|
||||
err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
|
||||
if (unlikely(err < 0)) {
|
||||
if (unlikely(!err)) {
|
||||
xdp_return_frame_rx_napi(xdpf);
|
||||
} else if (unlikely(err < 0)) {
|
||||
trace_xdp_exception(vi->dev, xdp_prog, act);
|
||||
if (unlikely(xdp_page != page))
|
||||
put_page(xdp_page);
|
||||
|
@ -608,8 +608,8 @@ static int xennet_xdp_xmit(struct net_device *dev, int n,
|
||||
struct netfront_info *np = netdev_priv(dev);
|
||||
struct netfront_queue *queue = NULL;
|
||||
unsigned long irq_flags;
|
||||
int drops = 0;
|
||||
int i, err;
|
||||
int nxmit = 0;
|
||||
int i;
|
||||
|
||||
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
|
||||
return -EINVAL;
|
||||
@ -622,15 +622,13 @@ static int xennet_xdp_xmit(struct net_device *dev, int n,
|
||||
|
||||
if (!xdpf)
|
||||
continue;
|
||||
err = xennet_xdp_xmit_one(dev, queue, xdpf);
|
||||
if (err) {
|
||||
xdp_return_frame_rx_napi(xdpf);
|
||||
drops++;
|
||||
}
|
||||
if (xennet_xdp_xmit_one(dev, queue, xdpf))
|
||||
break;
|
||||
nxmit++;
|
||||
}
|
||||
spin_unlock_irqrestore(&queue->tx_lock, irq_flags);
|
||||
|
||||
return n - drops;
|
||||
return nxmit;
|
||||
}
|
||||
|
||||
|
||||
@ -875,7 +873,9 @@ static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata,
|
||||
get_page(pdata);
|
||||
xdpf = xdp_convert_buff_to_frame(xdp);
|
||||
err = xennet_xdp_xmit(queue->info->netdev, 1, &xdpf, 0);
|
||||
if (unlikely(err < 0))
|
||||
if (unlikely(!err))
|
||||
xdp_return_frame_rx_napi(xdpf);
|
||||
else if (unlikely(err < 0))
|
||||
trace_xdp_exception(queue->info->netdev, prog, act);
|
||||
break;
|
||||
case XDP_REDIRECT:
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <uapi/linux/bpf.h>
|
||||
|
||||
#define BTF_TYPE_EMIT(type) ((void)(type *)0)
|
||||
#define BTF_TYPE_EMIT_ENUM(enum_val) ((void)enum_val)
|
||||
|
||||
struct btf;
|
||||
struct btf_member;
|
||||
|
@ -67,7 +67,7 @@ BPF_CALL_2(bpf_bprm_opts_set, struct linux_binprm *, bprm, u64, flags)
|
||||
|
||||
BTF_ID_LIST_SINGLE(bpf_bprm_opts_set_btf_ids, struct, linux_binprm)
|
||||
|
||||
const static struct bpf_func_proto bpf_bprm_opts_set_proto = {
|
||||
static const struct bpf_func_proto bpf_bprm_opts_set_proto = {
|
||||
.func = bpf_bprm_opts_set,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
@ -88,7 +88,7 @@ static bool bpf_ima_inode_hash_allowed(const struct bpf_prog *prog)
|
||||
|
||||
BTF_ID_LIST_SINGLE(bpf_ima_inode_hash_btf_ids, struct, inode)
|
||||
|
||||
const static struct bpf_func_proto bpf_ima_inode_hash_proto = {
|
||||
static const struct bpf_func_proto bpf_ima_inode_hash_proto = {
|
||||
.func = bpf_ima_inode_hash,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
|
@ -20,7 +20,7 @@
|
||||
|
||||
DEFINE_BPF_STORAGE_CACHE(task_cache);
|
||||
|
||||
DEFINE_PER_CPU(int, bpf_task_storage_busy);
|
||||
static DEFINE_PER_CPU(int, bpf_task_storage_busy);
|
||||
|
||||
static void bpf_task_storage_lock(void)
|
||||
{
|
||||
|
@ -329,7 +329,7 @@ bool dev_map_can_have_prog(struct bpf_map *map)
|
||||
static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
|
||||
{
|
||||
struct net_device *dev = bq->dev;
|
||||
int sent = 0, drops = 0, err = 0;
|
||||
int sent = 0, err = 0;
|
||||
int i;
|
||||
|
||||
if (unlikely(!bq->count))
|
||||
@ -343,29 +343,23 @@ static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
|
||||
|
||||
sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, flags);
|
||||
if (sent < 0) {
|
||||
/* If ndo_xdp_xmit fails with an errno, no frames have
|
||||
* been xmit'ed.
|
||||
*/
|
||||
err = sent;
|
||||
sent = 0;
|
||||
goto error;
|
||||
}
|
||||
drops = bq->count - sent;
|
||||
out:
|
||||
bq->count = 0;
|
||||
|
||||
trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, drops, err);
|
||||
bq->dev_rx = NULL;
|
||||
__list_del_clearprev(&bq->flush_node);
|
||||
return;
|
||||
error:
|
||||
/* If ndo_xdp_xmit fails with an errno, no frames have been
|
||||
* xmit'ed and it's our responsibility to them free all.
|
||||
/* If not all frames have been transmitted, it is our
|
||||
* responsibility to free them
|
||||
*/
|
||||
for (i = 0; i < bq->count; i++) {
|
||||
struct xdp_frame *xdpf = bq->q[i];
|
||||
for (i = sent; unlikely(i < bq->count); i++)
|
||||
xdp_return_frame_rx_napi(bq->q[i]);
|
||||
|
||||
xdp_return_frame_rx_napi(xdpf);
|
||||
drops++;
|
||||
}
|
||||
goto out;
|
||||
trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, bq->count - sent, err);
|
||||
bq->dev_rx = NULL;
|
||||
bq->count = 0;
|
||||
__list_del_clearprev(&bq->flush_node);
|
||||
}
|
||||
|
||||
/* __dev_flush is called from xdp_do_flush() which _must_ be signaled
|
||||
|
@ -31,7 +31,7 @@
|
||||
/*
|
||||
* The bucket lock has two protection scopes:
|
||||
*
|
||||
* 1) Serializing concurrent operations from BPF programs on differrent
|
||||
* 1) Serializing concurrent operations from BPF programs on different
|
||||
* CPUs
|
||||
*
|
||||
* 2) Serializing concurrent operations from BPF programs and sys_bpf()
|
||||
|
@ -11912,7 +11912,6 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
|
||||
insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) {
|
||||
const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X;
|
||||
const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
|
||||
struct bpf_insn insn_buf[16];
|
||||
struct bpf_insn *patch = &insn_buf[0];
|
||||
bool issrc, isneg;
|
||||
u32 off_reg;
|
||||
|
@ -4729,6 +4729,9 @@ static int _bpf_setsockopt(struct sock *sk, int level, int optname,
|
||||
sk->sk_prot->keepalive(sk, valbool);
|
||||
sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
|
||||
break;
|
||||
case SO_REUSEPORT:
|
||||
sk->sk_reuseport = valbool;
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
}
|
||||
@ -4898,6 +4901,9 @@ static int _bpf_getsockopt(struct sock *sk, int level, int optname,
|
||||
case SO_BINDTOIFINDEX:
|
||||
*((int *)optval) = sk->sk_bound_dev_if;
|
||||
break;
|
||||
case SO_REUSEPORT:
|
||||
*((int *)optval) = sk->sk_reuseport;
|
||||
break;
|
||||
default:
|
||||
goto err_clear;
|
||||
}
|
||||
|
@ -267,6 +267,7 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/errqueue.h>
|
||||
#include <linux/static_key.h>
|
||||
#include <linux/btf.h>
|
||||
|
||||
#include <net/icmp.h>
|
||||
#include <net/inet_common.h>
|
||||
@ -2587,6 +2588,17 @@ void tcp_set_state(struct sock *sk, int state)
|
||||
BUILD_BUG_ON((int)BPF_TCP_NEW_SYN_RECV != (int)TCP_NEW_SYN_RECV);
|
||||
BUILD_BUG_ON((int)BPF_TCP_MAX_STATES != (int)TCP_MAX_STATES);
|
||||
|
||||
/* bpf uapi header bpf.h defines an anonymous enum with values
|
||||
* BPF_TCP_* used by bpf programs. Currently gcc built vmlinux
|
||||
* is able to emit this enum in DWARF due to the above BUILD_BUG_ON.
|
||||
* But clang built vmlinux does not have this enum in DWARF
|
||||
* since clang removes the above code before generating IR/debuginfo.
|
||||
* Let us explicitly emit the type debuginfo to ensure the
|
||||
* above-mentioned anonymous enum in the vmlinux DWARF and hence BTF
|
||||
* regardless of which compiler is used.
|
||||
*/
|
||||
BTF_TYPE_EMIT_ENUM(BPF_TCP_ESTABLISHED);
|
||||
|
||||
if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_STATE_CB_FLAG))
|
||||
tcp_call_bpf_2arg(sk, BPF_SOCK_OPS_STATE_CB, oldstate, state);
|
||||
|
||||
|
@ -10,7 +10,7 @@
|
||||
Usage() {
|
||||
echo "Script for testing HBM (Host Bandwidth Manager) framework."
|
||||
echo "It creates a cgroup to use for testing and load a BPF program to limit"
|
||||
echo "egress or ingress bandwidht. It then uses iperf3 or netperf to create"
|
||||
echo "egress or ingress bandwidth. It then uses iperf3 or netperf to create"
|
||||
echo "loads. The output is the goodput in Mbps (unless -D was used)."
|
||||
echo ""
|
||||
echo "USAGE: $name [out] [-b=<prog>|--bpf=<prog>] [-c=<cc>|--cc=<cc>]"
|
||||
|
@ -14,16 +14,37 @@ SYNOPSIS
|
||||
|
||||
*OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] }
|
||||
|
||||
*COMMAND* := { **skeleton** | **help** }
|
||||
*COMMAND* := { **object** | **skeleton** | **help** }
|
||||
|
||||
GEN COMMANDS
|
||||
=============
|
||||
|
||||
| **bpftool** **gen skeleton** *FILE*
|
||||
| **bpftool** **gen object** *OUTPUT_FILE* *INPUT_FILE* [*INPUT_FILE*...]
|
||||
| **bpftool** **gen skeleton** *FILE* [**name** *OBJECT_NAME*]
|
||||
| **bpftool** **gen help**
|
||||
|
||||
DESCRIPTION
|
||||
===========
|
||||
**bpftool gen object** *OUTPUT_FILE* *INPUT_FILE* [*INPUT_FILE*...]
|
||||
Statically link (combine) together one or more *INPUT_FILE*'s
|
||||
into a single resulting *OUTPUT_FILE*. All the files involved
|
||||
are BPF ELF object files.
|
||||
|
||||
The rules of BPF static linking are mostly the same as for
|
||||
user-space object files, but in addition to combining data
|
||||
and instruction sections, .BTF and .BTF.ext (if present in
|
||||
any of the input files) data are combined together. .BTF
|
||||
data is deduplicated, so all the common types across
|
||||
*INPUT_FILE*'s will only be represented once in the resulting
|
||||
BTF information.
|
||||
|
||||
BPF static linking allows to partition BPF source code into
|
||||
individually compiled files that are then linked into
|
||||
a single resulting BPF object file, which can be used to
|
||||
generated BPF skeleton (with **gen skeleton** command) or
|
||||
passed directly into **libbpf** (using **bpf_object__open()**
|
||||
family of APIs).
|
||||
|
||||
**bpftool gen skeleton** *FILE*
|
||||
Generate BPF skeleton C header file for a given *FILE*.
|
||||
|
||||
@ -75,10 +96,13 @@ DESCRIPTION
|
||||
specific maps, programs, etc.
|
||||
|
||||
As part of skeleton, few custom functions are generated.
|
||||
Each of them is prefixed with object name, derived from
|
||||
object file name. I.e., if BPF object file name is
|
||||
**example.o**, BPF object name will be **example**. The
|
||||
following custom functions are provided in such case:
|
||||
Each of them is prefixed with object name. Object name can
|
||||
either be derived from object file name, i.e., if BPF object
|
||||
file name is **example.o**, BPF object name will be
|
||||
**example**. Object name can be also specified explicitly
|
||||
through **name** *OBJECT_NAME* parameter. The following
|
||||
custom functions are provided (assuming **example** as
|
||||
the object name):
|
||||
|
||||
- **example__open** and **example__open_opts**.
|
||||
These functions are used to instantiate skeleton. It
|
||||
@ -130,26 +154,19 @@ OPTIONS
|
||||
|
||||
EXAMPLES
|
||||
========
|
||||
**$ cat example.c**
|
||||
**$ cat example1.bpf.c**
|
||||
|
||||
::
|
||||
|
||||
#include <stdbool.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/bpf.h>
|
||||
#include "bpf_helpers.h"
|
||||
#include <bpf/bpf_helpers.h>
|
||||
|
||||
const volatile int param1 = 42;
|
||||
bool global_flag = true;
|
||||
struct { int x; } data = {};
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, 128);
|
||||
__type(key, int);
|
||||
__type(value, long);
|
||||
} my_map SEC(".maps");
|
||||
|
||||
SEC("raw_tp/sys_enter")
|
||||
int handle_sys_enter(struct pt_regs *ctx)
|
||||
{
|
||||
@ -161,6 +178,21 @@ EXAMPLES
|
||||
return 0;
|
||||
}
|
||||
|
||||
**$ cat example2.bpf.c**
|
||||
|
||||
::
|
||||
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, 128);
|
||||
__type(key, int);
|
||||
__type(value, long);
|
||||
} my_map SEC(".maps");
|
||||
|
||||
SEC("raw_tp/sys_exit")
|
||||
int handle_sys_exit(struct pt_regs *ctx)
|
||||
{
|
||||
@ -170,9 +202,17 @@ EXAMPLES
|
||||
}
|
||||
|
||||
This is example BPF application with two BPF programs and a mix of BPF maps
|
||||
and global variables.
|
||||
and global variables. Source code is split across two source code files.
|
||||
|
||||
**$ bpftool gen skeleton example.o**
|
||||
**$ clang -target bpf -g example1.bpf.c -o example1.bpf.o**
|
||||
**$ clang -target bpf -g example2.bpf.c -o example2.bpf.o**
|
||||
**$ bpftool gen object example.bpf.o example1.bpf.o example2.bpf.o**
|
||||
|
||||
This set of commands compiles *example1.bpf.c* and *example2.bpf.c*
|
||||
individually and then statically links respective object files into the final
|
||||
BPF ELF object file *example.bpf.o*.
|
||||
|
||||
**$ bpftool gen skeleton example.bpf.o name example | tee example.skel.h**
|
||||
|
||||
::
|
||||
|
||||
@ -227,7 +267,7 @@ and global variables.
|
||||
|
||||
#endif /* __EXAMPLE_SKEL_H__ */
|
||||
|
||||
**$ cat example_user.c**
|
||||
**$ cat example.c**
|
||||
|
||||
::
|
||||
|
||||
@ -270,7 +310,7 @@ and global variables.
|
||||
return err;
|
||||
}
|
||||
|
||||
**# ./example_user**
|
||||
**# ./example**
|
||||
|
||||
::
|
||||
|
||||
|
@ -981,12 +981,25 @@ _bpftool()
|
||||
;;
|
||||
gen)
|
||||
case $command in
|
||||
skeleton)
|
||||
object)
|
||||
_filedir
|
||||
return 0
|
||||
;;
|
||||
skeleton)
|
||||
case $prev in
|
||||
$command)
|
||||
_filedir
|
||||
return 0
|
||||
;;
|
||||
*)
|
||||
_bpftool_once_attr 'name'
|
||||
return 0
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
[[ $prev == $object ]] && \
|
||||
COMPREPLY=( $( compgen -W 'skeleton help' -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W 'object skeleton help' -- "$cur" ) )
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
|
@ -546,6 +546,7 @@ static int do_dump(int argc, char **argv)
|
||||
NEXT_ARG();
|
||||
if (argc < 1) {
|
||||
p_err("expecting value for 'format' option\n");
|
||||
err = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
if (strcmp(*argv, "c") == 0) {
|
||||
@ -555,11 +556,13 @@ static int do_dump(int argc, char **argv)
|
||||
} else {
|
||||
p_err("unrecognized format specifier: '%s', possible values: raw, c",
|
||||
*argv);
|
||||
err = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
NEXT_ARG();
|
||||
} else {
|
||||
p_err("unrecognized option: '%s'", *argv);
|
||||
err = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
|
@ -273,7 +273,7 @@ static int do_skeleton(int argc, char **argv)
|
||||
char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SKEL_H__")];
|
||||
size_t i, map_cnt = 0, prog_cnt = 0, file_sz, mmap_sz;
|
||||
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts);
|
||||
char obj_name[MAX_OBJ_NAME_LEN], *obj_data;
|
||||
char obj_name[MAX_OBJ_NAME_LEN] = "", *obj_data;
|
||||
struct bpf_object *obj = NULL;
|
||||
const char *file, *ident;
|
||||
struct bpf_program *prog;
|
||||
@ -288,6 +288,28 @@ static int do_skeleton(int argc, char **argv)
|
||||
}
|
||||
file = GET_ARG();
|
||||
|
||||
while (argc) {
|
||||
if (!REQ_ARGS(2))
|
||||
return -1;
|
||||
|
||||
if (is_prefix(*argv, "name")) {
|
||||
NEXT_ARG();
|
||||
|
||||
if (obj_name[0] != '\0') {
|
||||
p_err("object name already specified");
|
||||
return -1;
|
||||
}
|
||||
|
||||
strncpy(obj_name, *argv, MAX_OBJ_NAME_LEN - 1);
|
||||
obj_name[MAX_OBJ_NAME_LEN - 1] = '\0';
|
||||
} else {
|
||||
p_err("unknown arg %s", *argv);
|
||||
return -1;
|
||||
}
|
||||
|
||||
NEXT_ARG();
|
||||
}
|
||||
|
||||
if (argc) {
|
||||
p_err("extra unknown arguments");
|
||||
return -1;
|
||||
@ -310,7 +332,8 @@ static int do_skeleton(int argc, char **argv)
|
||||
p_err("failed to mmap() %s: %s", file, strerror(errno));
|
||||
goto out;
|
||||
}
|
||||
get_obj_name(obj_name, file);
|
||||
if (obj_name[0] == '\0')
|
||||
get_obj_name(obj_name, file);
|
||||
opts.object_name = obj_name;
|
||||
obj = bpf_object__open_mem(obj_data, file_sz, &opts);
|
||||
if (IS_ERR(obj)) {
|
||||
@ -591,6 +614,47 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int do_object(int argc, char **argv)
|
||||
{
|
||||
struct bpf_linker *linker;
|
||||
const char *output_file, *file;
|
||||
int err = 0;
|
||||
|
||||
if (!REQ_ARGS(2)) {
|
||||
usage();
|
||||
return -1;
|
||||
}
|
||||
|
||||
output_file = GET_ARG();
|
||||
|
||||
linker = bpf_linker__new(output_file, NULL);
|
||||
if (!linker) {
|
||||
p_err("failed to create BPF linker instance");
|
||||
return -1;
|
||||
}
|
||||
|
||||
while (argc) {
|
||||
file = GET_ARG();
|
||||
|
||||
err = bpf_linker__add_file(linker, file);
|
||||
if (err) {
|
||||
p_err("failed to link '%s': %s (%d)", file, strerror(err), err);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
err = bpf_linker__finalize(linker);
|
||||
if (err) {
|
||||
p_err("failed to finalize ELF file: %s (%d)", strerror(err), err);
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = 0;
|
||||
out:
|
||||
bpf_linker__free(linker);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int do_help(int argc, char **argv)
|
||||
{
|
||||
if (json_output) {
|
||||
@ -599,7 +663,8 @@ static int do_help(int argc, char **argv)
|
||||
}
|
||||
|
||||
fprintf(stderr,
|
||||
"Usage: %1$s %2$s skeleton FILE\n"
|
||||
"Usage: %1$s %2$s object OUTPUT_FILE INPUT_FILE [INPUT_FILE...]\n"
|
||||
" %1$s %2$s skeleton FILE [name OBJECT_NAME]\n"
|
||||
" %1$s %2$s help\n"
|
||||
"\n"
|
||||
" " HELP_SPEC_OPTIONS "\n"
|
||||
@ -610,6 +675,7 @@ static int do_help(int argc, char **argv)
|
||||
}
|
||||
|
||||
static const struct cmd cmds[] = {
|
||||
{ "object", do_object },
|
||||
{ "skeleton", do_skeleton },
|
||||
{ "help", do_help },
|
||||
{ 0 }
|
||||
|
@ -276,7 +276,7 @@ static int do_batch(int argc, char **argv)
|
||||
int n_argc;
|
||||
FILE *fp;
|
||||
char *cp;
|
||||
int err;
|
||||
int err = 0;
|
||||
int i;
|
||||
|
||||
if (argc < 2) {
|
||||
@ -370,7 +370,6 @@ static int do_batch(int argc, char **argv)
|
||||
} else {
|
||||
if (!json_output)
|
||||
printf("processed %d commands\n", lines);
|
||||
err = 0;
|
||||
}
|
||||
err_close:
|
||||
if (fp != stdin)
|
||||
|
@ -100,7 +100,7 @@ static int do_dump_btf(const struct btf_dumper *d,
|
||||
void *value)
|
||||
{
|
||||
__u32 value_id;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
/* start of key-value pair */
|
||||
jsonw_start_object(d->jw);
|
||||
|
@ -1,3 +1,3 @@
|
||||
libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o \
|
||||
netlink.o bpf_prog_linfo.o libbpf_probes.o xsk.o hashmap.o \
|
||||
btf_dump.o ringbuf.o
|
||||
btf_dump.o ringbuf.o strset.o linker.o
|
||||
|
@ -228,7 +228,6 @@ install_headers: $(BPF_HELPER_DEFS)
|
||||
$(call do_install,bpf.h,$(prefix)/include/bpf,644); \
|
||||
$(call do_install,libbpf.h,$(prefix)/include/bpf,644); \
|
||||
$(call do_install,btf.h,$(prefix)/include/bpf,644); \
|
||||
$(call do_install,libbpf_util.h,$(prefix)/include/bpf,644); \
|
||||
$(call do_install,libbpf_common.h,$(prefix)/include/bpf,644); \
|
||||
$(call do_install,xsk.h,$(prefix)/include/bpf,644); \
|
||||
$(call do_install,bpf_helpers.h,$(prefix)/include/bpf,644); \
|
||||
|
@ -29,9 +29,10 @@
|
||||
*/
|
||||
#define SEC(NAME) __attribute__((section(NAME), used))
|
||||
|
||||
#ifndef __always_inline
|
||||
/* Avoid 'linux/stddef.h' definition of '__always_inline'. */
|
||||
#undef __always_inline
|
||||
#define __always_inline inline __attribute__((always_inline))
|
||||
#endif
|
||||
|
||||
#ifndef __noinline
|
||||
#define __noinline __attribute__((noinline))
|
||||
#endif
|
||||
@ -39,8 +40,22 @@
|
||||
#define __weak __attribute__((weak))
|
||||
#endif
|
||||
|
||||
/* When utilizing vmlinux.h with BPF CO-RE, user BPF programs can't include
|
||||
* any system-level headers (such as stddef.h, linux/version.h, etc), and
|
||||
* commonly-used macros like NULL and KERNEL_VERSION aren't available through
|
||||
* vmlinux.h. This just adds unnecessary hurdles and forces users to re-define
|
||||
* them on their own. So as a convenience, provide such definitions here.
|
||||
*/
|
||||
#ifndef NULL
|
||||
#define NULL ((void *)0)
|
||||
#endif
|
||||
|
||||
#ifndef KERNEL_VERSION
|
||||
#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + ((c) > 255 ? 255 : (c))
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Helper macro to manipulate data structures
|
||||
* Helper macros to manipulate data structures
|
||||
*/
|
||||
#ifndef offsetof
|
||||
#define offsetof(TYPE, MEMBER) ((unsigned long)&((TYPE *)0)->MEMBER)
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include "libbpf.h"
|
||||
#include "libbpf_internal.h"
|
||||
#include "hashmap.h"
|
||||
#include "strset.h"
|
||||
|
||||
#define BTF_MAX_NR_TYPES 0x7fffffffU
|
||||
#define BTF_MAX_STR_OFFSET 0x7fffffffU
|
||||
@ -67,7 +68,7 @@ struct btf {
|
||||
* | | |
|
||||
* hdr | |
|
||||
* types_data----+ |
|
||||
* strs_data------------------+
|
||||
* strset__data(strs_set)-----+
|
||||
*
|
||||
* +----------+---------+-----------+
|
||||
* | Header | Types | Strings |
|
||||
@ -105,20 +106,15 @@ struct btf {
|
||||
*/
|
||||
int start_str_off;
|
||||
|
||||
/* only one of strs_data or strs_set can be non-NULL, depending on
|
||||
* whether BTF is in a modifiable state (strs_set is used) or not
|
||||
* (strs_data points inside raw_data)
|
||||
*/
|
||||
void *strs_data;
|
||||
size_t strs_data_cap; /* used size stored in hdr->str_len */
|
||||
|
||||
/* lookup index for each unique string in strings section */
|
||||
struct hashmap *strs_hash;
|
||||
/* a set of unique strings */
|
||||
struct strset *strs_set;
|
||||
/* whether strings are already deduplicated */
|
||||
bool strs_deduped;
|
||||
/* extra indirection layer to make strings hashmap work with stable
|
||||
* string offsets and ability to transparently choose between
|
||||
* btf->strs_data or btf_dedup->strs_data as a source of strings.
|
||||
* This is used for BTF strings dedup to transfer deduplicated strings
|
||||
* data back to struct btf without re-building strings index.
|
||||
*/
|
||||
void **strs_data_ptr;
|
||||
|
||||
/* BTF object FD, if loaded into kernel */
|
||||
int fd;
|
||||
@ -142,8 +138,8 @@ static inline __u64 ptr_to_u64(const void *ptr)
|
||||
* On success, memory pointer to the beginning of unused memory is returned.
|
||||
* On error, NULL is returned.
|
||||
*/
|
||||
void *btf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz,
|
||||
size_t cur_cnt, size_t max_cnt, size_t add_cnt)
|
||||
void *libbpf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz,
|
||||
size_t cur_cnt, size_t max_cnt, size_t add_cnt)
|
||||
{
|
||||
size_t new_cnt;
|
||||
void *new_data;
|
||||
@ -179,14 +175,14 @@ void *btf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz,
|
||||
/* Ensure given dynamically allocated memory region has enough allocated space
|
||||
* to accommodate *need_cnt* elements of size *elem_sz* bytes each
|
||||
*/
|
||||
int btf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_cnt)
|
||||
int libbpf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_cnt)
|
||||
{
|
||||
void *p;
|
||||
|
||||
if (need_cnt <= *cap_cnt)
|
||||
return 0;
|
||||
|
||||
p = btf_add_mem(data, cap_cnt, elem_sz, *cap_cnt, SIZE_MAX, need_cnt - *cap_cnt);
|
||||
p = libbpf_add_mem(data, cap_cnt, elem_sz, *cap_cnt, SIZE_MAX, need_cnt - *cap_cnt);
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -197,8 +193,8 @@ static int btf_add_type_idx_entry(struct btf *btf, __u32 type_off)
|
||||
{
|
||||
__u32 *p;
|
||||
|
||||
p = btf_add_mem((void **)&btf->type_offs, &btf->type_offs_cap, sizeof(__u32),
|
||||
btf->nr_types, BTF_MAX_NR_TYPES, 1);
|
||||
p = libbpf_add_mem((void **)&btf->type_offs, &btf->type_offs_cap, sizeof(__u32),
|
||||
btf->nr_types, BTF_MAX_NR_TYPES, 1);
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -435,7 +431,7 @@ const struct btf *btf__base_btf(const struct btf *btf)
|
||||
}
|
||||
|
||||
/* internal helper returning non-const pointer to a type */
|
||||
static struct btf_type *btf_type_by_id(struct btf *btf, __u32 type_id)
|
||||
struct btf_type *btf_type_by_id(struct btf *btf, __u32 type_id)
|
||||
{
|
||||
if (type_id == 0)
|
||||
return &btf_void;
|
||||
@ -738,7 +734,7 @@ void btf__free(struct btf *btf)
|
||||
*/
|
||||
free(btf->hdr);
|
||||
free(btf->types_data);
|
||||
free(btf->strs_data);
|
||||
strset__free(btf->strs_set);
|
||||
}
|
||||
free(btf->raw_data);
|
||||
free(btf->raw_data_swapped);
|
||||
@ -1246,6 +1242,11 @@ void btf__set_fd(struct btf *btf, int fd)
|
||||
btf->fd = fd;
|
||||
}
|
||||
|
||||
static const void *btf_strs_data(const struct btf *btf)
|
||||
{
|
||||
return btf->strs_data ? btf->strs_data : strset__data(btf->strs_set);
|
||||
}
|
||||
|
||||
static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian)
|
||||
{
|
||||
struct btf_header *hdr = btf->hdr;
|
||||
@ -1286,7 +1287,7 @@ static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endi
|
||||
}
|
||||
p += hdr->type_len;
|
||||
|
||||
memcpy(p, btf->strs_data, hdr->str_len);
|
||||
memcpy(p, btf_strs_data(btf), hdr->str_len);
|
||||
p += hdr->str_len;
|
||||
|
||||
*size = data_sz;
|
||||
@ -1320,7 +1321,7 @@ const char *btf__str_by_offset(const struct btf *btf, __u32 offset)
|
||||
if (offset < btf->start_str_off)
|
||||
return btf__str_by_offset(btf->base_btf, offset);
|
||||
else if (offset - btf->start_str_off < btf->hdr->str_len)
|
||||
return btf->strs_data + (offset - btf->start_str_off);
|
||||
return btf_strs_data(btf) + (offset - btf->start_str_off);
|
||||
else
|
||||
return NULL;
|
||||
}
|
||||
@ -1474,25 +1475,6 @@ int btf__get_map_kv_tids(const struct btf *btf, const char *map_name,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static size_t strs_hash_fn(const void *key, void *ctx)
|
||||
{
|
||||
const struct btf *btf = ctx;
|
||||
const char *strs = *btf->strs_data_ptr;
|
||||
const char *str = strs + (long)key;
|
||||
|
||||
return str_hash(str);
|
||||
}
|
||||
|
||||
static bool strs_hash_equal_fn(const void *key1, const void *key2, void *ctx)
|
||||
{
|
||||
const struct btf *btf = ctx;
|
||||
const char *strs = *btf->strs_data_ptr;
|
||||
const char *str1 = strs + (long)key1;
|
||||
const char *str2 = strs + (long)key2;
|
||||
|
||||
return strcmp(str1, str2) == 0;
|
||||
}
|
||||
|
||||
static void btf_invalidate_raw_data(struct btf *btf)
|
||||
{
|
||||
if (btf->raw_data) {
|
||||
@ -1511,10 +1493,9 @@ static void btf_invalidate_raw_data(struct btf *btf)
|
||||
*/
|
||||
static int btf_ensure_modifiable(struct btf *btf)
|
||||
{
|
||||
void *hdr, *types, *strs, *strs_end, *s;
|
||||
struct hashmap *hash = NULL;
|
||||
long off;
|
||||
int err;
|
||||
void *hdr, *types;
|
||||
struct strset *set = NULL;
|
||||
int err = -ENOMEM;
|
||||
|
||||
if (btf_is_modifiable(btf)) {
|
||||
/* any BTF modification invalidates raw_data */
|
||||
@ -1525,44 +1506,25 @@ static int btf_ensure_modifiable(struct btf *btf)
|
||||
/* split raw data into three memory regions */
|
||||
hdr = malloc(btf->hdr->hdr_len);
|
||||
types = malloc(btf->hdr->type_len);
|
||||
strs = malloc(btf->hdr->str_len);
|
||||
if (!hdr || !types || !strs)
|
||||
if (!hdr || !types)
|
||||
goto err_out;
|
||||
|
||||
memcpy(hdr, btf->hdr, btf->hdr->hdr_len);
|
||||
memcpy(types, btf->types_data, btf->hdr->type_len);
|
||||
memcpy(strs, btf->strs_data, btf->hdr->str_len);
|
||||
|
||||
/* make hashmap below use btf->strs_data as a source of strings */
|
||||
btf->strs_data_ptr = &btf->strs_data;
|
||||
|
||||
/* build lookup index for all strings */
|
||||
hash = hashmap__new(strs_hash_fn, strs_hash_equal_fn, btf);
|
||||
if (IS_ERR(hash)) {
|
||||
err = PTR_ERR(hash);
|
||||
hash = NULL;
|
||||
set = strset__new(BTF_MAX_STR_OFFSET, btf->strs_data, btf->hdr->str_len);
|
||||
if (IS_ERR(set)) {
|
||||
err = PTR_ERR(set);
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
strs_end = strs + btf->hdr->str_len;
|
||||
for (off = 0, s = strs; s < strs_end; off += strlen(s) + 1, s = strs + off) {
|
||||
/* hashmap__add() returns EEXIST if string with the same
|
||||
* content already is in the hash map
|
||||
*/
|
||||
err = hashmap__add(hash, (void *)off, (void *)off);
|
||||
if (err == -EEXIST)
|
||||
continue; /* duplicate */
|
||||
if (err)
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
/* only when everything was successful, update internal state */
|
||||
btf->hdr = hdr;
|
||||
btf->types_data = types;
|
||||
btf->types_data_cap = btf->hdr->type_len;
|
||||
btf->strs_data = strs;
|
||||
btf->strs_data_cap = btf->hdr->str_len;
|
||||
btf->strs_hash = hash;
|
||||
btf->strs_data = NULL;
|
||||
btf->strs_set = set;
|
||||
/* if BTF was created from scratch, all strings are guaranteed to be
|
||||
* unique and deduplicated
|
||||
*/
|
||||
@ -1577,17 +1539,10 @@ static int btf_ensure_modifiable(struct btf *btf)
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
hashmap__free(hash);
|
||||
strset__free(set);
|
||||
free(hdr);
|
||||
free(types);
|
||||
free(strs);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void *btf_add_str_mem(struct btf *btf, size_t add_sz)
|
||||
{
|
||||
return btf_add_mem(&btf->strs_data, &btf->strs_data_cap, 1,
|
||||
btf->hdr->str_len, BTF_MAX_STR_OFFSET, add_sz);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Find an offset in BTF string section that corresponds to a given string *s*.
|
||||
@ -1598,34 +1553,23 @@ static void *btf_add_str_mem(struct btf *btf, size_t add_sz)
|
||||
*/
|
||||
int btf__find_str(struct btf *btf, const char *s)
|
||||
{
|
||||
long old_off, new_off, len;
|
||||
void *p;
|
||||
int off;
|
||||
|
||||
if (btf->base_btf) {
|
||||
int ret;
|
||||
|
||||
ret = btf__find_str(btf->base_btf, s);
|
||||
if (ret != -ENOENT)
|
||||
return ret;
|
||||
off = btf__find_str(btf->base_btf, s);
|
||||
if (off != -ENOENT)
|
||||
return off;
|
||||
}
|
||||
|
||||
/* BTF needs to be in a modifiable state to build string lookup index */
|
||||
if (btf_ensure_modifiable(btf))
|
||||
return -ENOMEM;
|
||||
|
||||
/* see btf__add_str() for why we do this */
|
||||
len = strlen(s) + 1;
|
||||
p = btf_add_str_mem(btf, len);
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
off = strset__find_str(btf->strs_set, s);
|
||||
if (off < 0)
|
||||
return off;
|
||||
|
||||
new_off = btf->hdr->str_len;
|
||||
memcpy(p, s, len);
|
||||
|
||||
if (hashmap__find(btf->strs_hash, (void *)new_off, (void **)&old_off))
|
||||
return btf->start_str_off + old_off;
|
||||
|
||||
return -ENOENT;
|
||||
return btf->start_str_off + off;
|
||||
}
|
||||
|
||||
/* Add a string s to the BTF string section.
|
||||
@ -1635,56 +1579,30 @@ int btf__find_str(struct btf *btf, const char *s)
|
||||
*/
|
||||
int btf__add_str(struct btf *btf, const char *s)
|
||||
{
|
||||
long old_off, new_off, len;
|
||||
void *p;
|
||||
int err;
|
||||
int off;
|
||||
|
||||
if (btf->base_btf) {
|
||||
int ret;
|
||||
|
||||
ret = btf__find_str(btf->base_btf, s);
|
||||
if (ret != -ENOENT)
|
||||
return ret;
|
||||
off = btf__find_str(btf->base_btf, s);
|
||||
if (off != -ENOENT)
|
||||
return off;
|
||||
}
|
||||
|
||||
if (btf_ensure_modifiable(btf))
|
||||
return -ENOMEM;
|
||||
|
||||
/* Hashmap keys are always offsets within btf->strs_data, so to even
|
||||
* look up some string from the "outside", we need to first append it
|
||||
* at the end, so that it can be addressed with an offset. Luckily,
|
||||
* until btf->hdr->str_len is incremented, that string is just a piece
|
||||
* of garbage for the rest of BTF code, so no harm, no foul. On the
|
||||
* other hand, if the string is unique, it's already appended and
|
||||
* ready to be used, only a simple btf->hdr->str_len increment away.
|
||||
*/
|
||||
len = strlen(s) + 1;
|
||||
p = btf_add_str_mem(btf, len);
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
off = strset__add_str(btf->strs_set, s);
|
||||
if (off < 0)
|
||||
return off;
|
||||
|
||||
new_off = btf->hdr->str_len;
|
||||
memcpy(p, s, len);
|
||||
btf->hdr->str_len = strset__data_size(btf->strs_set);
|
||||
|
||||
/* Now attempt to add the string, but only if the string with the same
|
||||
* contents doesn't exist already (HASHMAP_ADD strategy). If such
|
||||
* string exists, we'll get its offset in old_off (that's old_key).
|
||||
*/
|
||||
err = hashmap__insert(btf->strs_hash, (void *)new_off, (void *)new_off,
|
||||
HASHMAP_ADD, (const void **)&old_off, NULL);
|
||||
if (err == -EEXIST)
|
||||
return btf->start_str_off + old_off; /* duplicated string, return existing offset */
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
btf->hdr->str_len += len; /* new unique string, adjust data length */
|
||||
return btf->start_str_off + new_off;
|
||||
return btf->start_str_off + off;
|
||||
}
|
||||
|
||||
static void *btf_add_type_mem(struct btf *btf, size_t add_sz)
|
||||
{
|
||||
return btf_add_mem(&btf->types_data, &btf->types_data_cap, 1,
|
||||
btf->hdr->type_len, UINT_MAX, add_sz);
|
||||
return libbpf_add_mem(&btf->types_data, &btf->types_data_cap, 1,
|
||||
btf->hdr->type_len, UINT_MAX, add_sz);
|
||||
}
|
||||
|
||||
static __u32 btf_type_info(int kind, int vlen, int kflag)
|
||||
@ -1711,6 +1629,54 @@ static int btf_commit_type(struct btf *btf, int data_sz)
|
||||
return btf->start_id + btf->nr_types - 1;
|
||||
}
|
||||
|
||||
struct btf_pipe {
|
||||
const struct btf *src;
|
||||
struct btf *dst;
|
||||
};
|
||||
|
||||
static int btf_rewrite_str(__u32 *str_off, void *ctx)
|
||||
{
|
||||
struct btf_pipe *p = ctx;
|
||||
int off;
|
||||
|
||||
if (!*str_off) /* nothing to do for empty strings */
|
||||
return 0;
|
||||
|
||||
off = btf__add_str(p->dst, btf__str_by_offset(p->src, *str_off));
|
||||
if (off < 0)
|
||||
return off;
|
||||
|
||||
*str_off = off;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_type *src_type)
|
||||
{
|
||||
struct btf_pipe p = { .src = src_btf, .dst = btf };
|
||||
struct btf_type *t;
|
||||
int sz, err;
|
||||
|
||||
sz = btf_type_size(src_type);
|
||||
if (sz < 0)
|
||||
return sz;
|
||||
|
||||
/* deconstruct BTF, if necessary, and invalidate raw_data */
|
||||
if (btf_ensure_modifiable(btf))
|
||||
return -ENOMEM;
|
||||
|
||||
t = btf_add_type_mem(btf, sz);
|
||||
if (!t)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(t, src_type, sz);
|
||||
|
||||
err = btf_type_visit_str_offs(t, btf_rewrite_str, &p);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return btf_commit_type(btf, sz);
|
||||
}
|
||||
|
||||
/*
|
||||
* Append new BTF_KIND_INT type with:
|
||||
* - *name* - non-empty, non-NULL type name;
|
||||
@ -3016,10 +2982,7 @@ struct btf_dedup {
|
||||
/* Various option modifying behavior of algorithm */
|
||||
struct btf_dedup_opts opts;
|
||||
/* temporary strings deduplication state */
|
||||
void *strs_data;
|
||||
size_t strs_cap;
|
||||
size_t strs_len;
|
||||
struct hashmap* strs_hash;
|
||||
struct strset *strs_set;
|
||||
};
|
||||
|
||||
static long hash_combine(long h, long value)
|
||||
@ -3155,95 +3118,28 @@ done:
|
||||
return d;
|
||||
}
|
||||
|
||||
typedef int (*str_off_fn_t)(__u32 *str_off_ptr, void *ctx);
|
||||
|
||||
/*
|
||||
* Iterate over all possible places in .BTF and .BTF.ext that can reference
|
||||
* string and pass pointer to it to a provided callback `fn`.
|
||||
*/
|
||||
static int btf_for_each_str_off(struct btf_dedup *d, str_off_fn_t fn, void *ctx)
|
||||
static int btf_for_each_str_off(struct btf_dedup *d, str_off_visit_fn fn, void *ctx)
|
||||
{
|
||||
void *line_data_cur, *line_data_end;
|
||||
int i, j, r, rec_size;
|
||||
struct btf_type *t;
|
||||
int i, r;
|
||||
|
||||
for (i = 0; i < d->btf->nr_types; i++) {
|
||||
t = btf_type_by_id(d->btf, d->btf->start_id + i);
|
||||
r = fn(&t->name_off, ctx);
|
||||
struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i);
|
||||
|
||||
r = btf_type_visit_str_offs(t, fn, ctx);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
switch (btf_kind(t)) {
|
||||
case BTF_KIND_STRUCT:
|
||||
case BTF_KIND_UNION: {
|
||||
struct btf_member *m = btf_members(t);
|
||||
__u16 vlen = btf_vlen(t);
|
||||
|
||||
for (j = 0; j < vlen; j++) {
|
||||
r = fn(&m->name_off, ctx);
|
||||
if (r)
|
||||
return r;
|
||||
m++;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case BTF_KIND_ENUM: {
|
||||
struct btf_enum *m = btf_enum(t);
|
||||
__u16 vlen = btf_vlen(t);
|
||||
|
||||
for (j = 0; j < vlen; j++) {
|
||||
r = fn(&m->name_off, ctx);
|
||||
if (r)
|
||||
return r;
|
||||
m++;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case BTF_KIND_FUNC_PROTO: {
|
||||
struct btf_param *m = btf_params(t);
|
||||
__u16 vlen = btf_vlen(t);
|
||||
|
||||
for (j = 0; j < vlen; j++) {
|
||||
r = fn(&m->name_off, ctx);
|
||||
if (r)
|
||||
return r;
|
||||
m++;
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!d->btf_ext)
|
||||
return 0;
|
||||
|
||||
line_data_cur = d->btf_ext->line_info.info;
|
||||
line_data_end = d->btf_ext->line_info.info + d->btf_ext->line_info.len;
|
||||
rec_size = d->btf_ext->line_info.rec_size;
|
||||
|
||||
while (line_data_cur < line_data_end) {
|
||||
struct btf_ext_info_sec *sec = line_data_cur;
|
||||
struct bpf_line_info_min *line_info;
|
||||
__u32 num_info = sec->num_info;
|
||||
|
||||
r = fn(&sec->sec_name_off, ctx);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
line_data_cur += sizeof(struct btf_ext_info_sec);
|
||||
for (i = 0; i < num_info; i++) {
|
||||
line_info = line_data_cur;
|
||||
r = fn(&line_info->file_name_off, ctx);
|
||||
if (r)
|
||||
return r;
|
||||
r = fn(&line_info->line_off, ctx);
|
||||
if (r)
|
||||
return r;
|
||||
line_data_cur += rec_size;
|
||||
}
|
||||
}
|
||||
r = btf_ext_visit_str_offs(d->btf_ext, fn, ctx);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -3252,10 +3148,8 @@ static int strs_dedup_remap_str_off(__u32 *str_off_ptr, void *ctx)
|
||||
{
|
||||
struct btf_dedup *d = ctx;
|
||||
__u32 str_off = *str_off_ptr;
|
||||
long old_off, new_off, len;
|
||||
const char *s;
|
||||
void *p;
|
||||
int err;
|
||||
int off, err;
|
||||
|
||||
/* don't touch empty string or string in main BTF */
|
||||
if (str_off == 0 || str_off < d->btf->start_str_off)
|
||||
@ -3272,29 +3166,11 @@ static int strs_dedup_remap_str_off(__u32 *str_off_ptr, void *ctx)
|
||||
return err;
|
||||
}
|
||||
|
||||
len = strlen(s) + 1;
|
||||
off = strset__add_str(d->strs_set, s);
|
||||
if (off < 0)
|
||||
return off;
|
||||
|
||||
new_off = d->strs_len;
|
||||
p = btf_add_mem(&d->strs_data, &d->strs_cap, 1, new_off, BTF_MAX_STR_OFFSET, len);
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(p, s, len);
|
||||
|
||||
/* Now attempt to add the string, but only if the string with the same
|
||||
* contents doesn't exist already (HASHMAP_ADD strategy). If such
|
||||
* string exists, we'll get its offset in old_off (that's old_key).
|
||||
*/
|
||||
err = hashmap__insert(d->strs_hash, (void *)new_off, (void *)new_off,
|
||||
HASHMAP_ADD, (const void **)&old_off, NULL);
|
||||
if (err == -EEXIST) {
|
||||
*str_off_ptr = d->btf->start_str_off + old_off;
|
||||
} else if (err) {
|
||||
return err;
|
||||
} else {
|
||||
*str_off_ptr = d->btf->start_str_off + new_off;
|
||||
d->strs_len += len;
|
||||
}
|
||||
*str_off_ptr = d->btf->start_str_off + off;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -3311,39 +3187,23 @@ static int strs_dedup_remap_str_off(__u32 *str_off_ptr, void *ctx)
|
||||
*/
|
||||
static int btf_dedup_strings(struct btf_dedup *d)
|
||||
{
|
||||
char *s;
|
||||
int err;
|
||||
|
||||
if (d->btf->strs_deduped)
|
||||
return 0;
|
||||
|
||||
/* temporarily switch to use btf_dedup's strs_data for strings for hash
|
||||
* functions; later we'll just transfer hashmap to struct btf as is,
|
||||
* along the strs_data
|
||||
*/
|
||||
d->btf->strs_data_ptr = &d->strs_data;
|
||||
|
||||
d->strs_hash = hashmap__new(strs_hash_fn, strs_hash_equal_fn, d->btf);
|
||||
if (IS_ERR(d->strs_hash)) {
|
||||
err = PTR_ERR(d->strs_hash);
|
||||
d->strs_hash = NULL;
|
||||
d->strs_set = strset__new(BTF_MAX_STR_OFFSET, NULL, 0);
|
||||
if (IS_ERR(d->strs_set)) {
|
||||
err = PTR_ERR(d->strs_set);
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
if (!d->btf->base_btf) {
|
||||
s = btf_add_mem(&d->strs_data, &d->strs_cap, 1, d->strs_len, BTF_MAX_STR_OFFSET, 1);
|
||||
if (!s)
|
||||
return -ENOMEM;
|
||||
/* initial empty string */
|
||||
s[0] = 0;
|
||||
d->strs_len = 1;
|
||||
|
||||
/* insert empty string; we won't be looking it up during strings
|
||||
* dedup, but it's good to have it for generic BTF string lookups
|
||||
*/
|
||||
err = hashmap__insert(d->strs_hash, (void *)0, (void *)0,
|
||||
HASHMAP_ADD, NULL, NULL);
|
||||
if (err)
|
||||
err = strset__add_str(d->strs_set, "");
|
||||
if (err < 0)
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
@ -3353,28 +3213,16 @@ static int btf_dedup_strings(struct btf_dedup *d)
|
||||
goto err_out;
|
||||
|
||||
/* replace BTF string data and hash with deduped ones */
|
||||
free(d->btf->strs_data);
|
||||
hashmap__free(d->btf->strs_hash);
|
||||
d->btf->strs_data = d->strs_data;
|
||||
d->btf->strs_data_cap = d->strs_cap;
|
||||
d->btf->hdr->str_len = d->strs_len;
|
||||
d->btf->strs_hash = d->strs_hash;
|
||||
/* now point strs_data_ptr back to btf->strs_data */
|
||||
d->btf->strs_data_ptr = &d->btf->strs_data;
|
||||
|
||||
d->strs_data = d->strs_hash = NULL;
|
||||
d->strs_len = d->strs_cap = 0;
|
||||
strset__free(d->btf->strs_set);
|
||||
d->btf->hdr->str_len = strset__data_size(d->strs_set);
|
||||
d->btf->strs_set = d->strs_set;
|
||||
d->strs_set = NULL;
|
||||
d->btf->strs_deduped = true;
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
free(d->strs_data);
|
||||
hashmap__free(d->strs_hash);
|
||||
d->strs_data = d->strs_hash = NULL;
|
||||
d->strs_len = d->strs_cap = 0;
|
||||
|
||||
/* restore strings pointer for existing d->btf->strs_hash back */
|
||||
d->btf->strs_data_ptr = &d->strs_data;
|
||||
strset__free(d->strs_set);
|
||||
d->strs_set = NULL;
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -4498,15 +4346,18 @@ static int btf_dedup_compact_types(struct btf_dedup *d)
|
||||
* then mapping it to a deduplicated type ID, stored in btf_dedup->hypot_map,
|
||||
* which is populated during compaction phase.
|
||||
*/
|
||||
static int btf_dedup_remap_type_id(struct btf_dedup *d, __u32 type_id)
|
||||
static int btf_dedup_remap_type_id(__u32 *type_id, void *ctx)
|
||||
{
|
||||
struct btf_dedup *d = ctx;
|
||||
__u32 resolved_type_id, new_type_id;
|
||||
|
||||
resolved_type_id = resolve_type_id(d, type_id);
|
||||
resolved_type_id = resolve_type_id(d, *type_id);
|
||||
new_type_id = d->hypot_map[resolved_type_id];
|
||||
if (new_type_id > BTF_MAX_NR_TYPES)
|
||||
return -EINVAL;
|
||||
return new_type_id;
|
||||
|
||||
*type_id = new_type_id;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -4519,109 +4370,25 @@ static int btf_dedup_remap_type_id(struct btf_dedup *d, __u32 type_id)
|
||||
* referenced from any BTF type (e.g., struct fields, func proto args, etc) to
|
||||
* their final deduped type IDs.
|
||||
*/
|
||||
static int btf_dedup_remap_type(struct btf_dedup *d, __u32 type_id)
|
||||
{
|
||||
struct btf_type *t = btf_type_by_id(d->btf, type_id);
|
||||
int i, r;
|
||||
|
||||
switch (btf_kind(t)) {
|
||||
case BTF_KIND_INT:
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_FLOAT:
|
||||
break;
|
||||
|
||||
case BTF_KIND_FWD:
|
||||
case BTF_KIND_CONST:
|
||||
case BTF_KIND_VOLATILE:
|
||||
case BTF_KIND_RESTRICT:
|
||||
case BTF_KIND_PTR:
|
||||
case BTF_KIND_TYPEDEF:
|
||||
case BTF_KIND_FUNC:
|
||||
case BTF_KIND_VAR:
|
||||
r = btf_dedup_remap_type_id(d, t->type);
|
||||
if (r < 0)
|
||||
return r;
|
||||
t->type = r;
|
||||
break;
|
||||
|
||||
case BTF_KIND_ARRAY: {
|
||||
struct btf_array *arr_info = btf_array(t);
|
||||
|
||||
r = btf_dedup_remap_type_id(d, arr_info->type);
|
||||
if (r < 0)
|
||||
return r;
|
||||
arr_info->type = r;
|
||||
r = btf_dedup_remap_type_id(d, arr_info->index_type);
|
||||
if (r < 0)
|
||||
return r;
|
||||
arr_info->index_type = r;
|
||||
break;
|
||||
}
|
||||
|
||||
case BTF_KIND_STRUCT:
|
||||
case BTF_KIND_UNION: {
|
||||
struct btf_member *member = btf_members(t);
|
||||
__u16 vlen = btf_vlen(t);
|
||||
|
||||
for (i = 0; i < vlen; i++) {
|
||||
r = btf_dedup_remap_type_id(d, member->type);
|
||||
if (r < 0)
|
||||
return r;
|
||||
member->type = r;
|
||||
member++;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case BTF_KIND_FUNC_PROTO: {
|
||||
struct btf_param *param = btf_params(t);
|
||||
__u16 vlen = btf_vlen(t);
|
||||
|
||||
r = btf_dedup_remap_type_id(d, t->type);
|
||||
if (r < 0)
|
||||
return r;
|
||||
t->type = r;
|
||||
|
||||
for (i = 0; i < vlen; i++) {
|
||||
r = btf_dedup_remap_type_id(d, param->type);
|
||||
if (r < 0)
|
||||
return r;
|
||||
param->type = r;
|
||||
param++;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case BTF_KIND_DATASEC: {
|
||||
struct btf_var_secinfo *var = btf_var_secinfos(t);
|
||||
__u16 vlen = btf_vlen(t);
|
||||
|
||||
for (i = 0; i < vlen; i++) {
|
||||
r = btf_dedup_remap_type_id(d, var->type);
|
||||
if (r < 0)
|
||||
return r;
|
||||
var->type = r;
|
||||
var++;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int btf_dedup_remap_types(struct btf_dedup *d)
|
||||
{
|
||||
int i, r;
|
||||
|
||||
for (i = 0; i < d->btf->nr_types; i++) {
|
||||
r = btf_dedup_remap_type(d, d->btf->start_id + i);
|
||||
if (r < 0)
|
||||
struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i);
|
||||
|
||||
r = btf_type_visit_type_ids(t, btf_dedup_remap_type_id, d);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
if (!d->btf_ext)
|
||||
return 0;
|
||||
|
||||
r = btf_ext_visit_type_ids(d->btf_ext, btf_dedup_remap_type_id, d);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -4675,3 +4442,200 @@ struct btf *libbpf_find_kernel_btf(void)
|
||||
pr_warn("failed to find valid kernel BTF\n");
|
||||
return ERR_PTR(-ESRCH);
|
||||
}
|
||||
|
||||
int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ctx)
|
||||
{
|
||||
int i, n, err;
|
||||
|
||||
switch (btf_kind(t)) {
|
||||
case BTF_KIND_INT:
|
||||
case BTF_KIND_FLOAT:
|
||||
case BTF_KIND_ENUM:
|
||||
return 0;
|
||||
|
||||
case BTF_KIND_FWD:
|
||||
case BTF_KIND_CONST:
|
||||
case BTF_KIND_VOLATILE:
|
||||
case BTF_KIND_RESTRICT:
|
||||
case BTF_KIND_PTR:
|
||||
case BTF_KIND_TYPEDEF:
|
||||
case BTF_KIND_FUNC:
|
||||
case BTF_KIND_VAR:
|
||||
return visit(&t->type, ctx);
|
||||
|
||||
case BTF_KIND_ARRAY: {
|
||||
struct btf_array *a = btf_array(t);
|
||||
|
||||
err = visit(&a->type, ctx);
|
||||
err = err ?: visit(&a->index_type, ctx);
|
||||
return err;
|
||||
}
|
||||
|
||||
case BTF_KIND_STRUCT:
|
||||
case BTF_KIND_UNION: {
|
||||
struct btf_member *m = btf_members(t);
|
||||
|
||||
for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
|
||||
err = visit(&m->type, ctx);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
case BTF_KIND_FUNC_PROTO: {
|
||||
struct btf_param *m = btf_params(t);
|
||||
|
||||
err = visit(&t->type, ctx);
|
||||
if (err)
|
||||
return err;
|
||||
for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
|
||||
err = visit(&m->type, ctx);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
case BTF_KIND_DATASEC: {
|
||||
struct btf_var_secinfo *m = btf_var_secinfos(t);
|
||||
|
||||
for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
|
||||
err = visit(&m->type, ctx);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ctx)
|
||||
{
|
||||
int i, n, err;
|
||||
|
||||
err = visit(&t->name_off, ctx);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
switch (btf_kind(t)) {
|
||||
case BTF_KIND_STRUCT:
|
||||
case BTF_KIND_UNION: {
|
||||
struct btf_member *m = btf_members(t);
|
||||
|
||||
for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
|
||||
err = visit(&m->name_off, ctx);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case BTF_KIND_ENUM: {
|
||||
struct btf_enum *m = btf_enum(t);
|
||||
|
||||
for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
|
||||
err = visit(&m->name_off, ctx);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case BTF_KIND_FUNC_PROTO: {
|
||||
struct btf_param *m = btf_params(t);
|
||||
|
||||
for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
|
||||
err = visit(&m->name_off, ctx);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx)
|
||||
{
|
||||
const struct btf_ext_info *seg;
|
||||
struct btf_ext_info_sec *sec;
|
||||
int i, err;
|
||||
|
||||
seg = &btf_ext->func_info;
|
||||
for_each_btf_ext_sec(seg, sec) {
|
||||
struct bpf_func_info_min *rec;
|
||||
|
||||
for_each_btf_ext_rec(seg, sec, i, rec) {
|
||||
err = visit(&rec->type_id, ctx);
|
||||
if (err < 0)
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
seg = &btf_ext->core_relo_info;
|
||||
for_each_btf_ext_sec(seg, sec) {
|
||||
struct bpf_core_relo *rec;
|
||||
|
||||
for_each_btf_ext_rec(seg, sec, i, rec) {
|
||||
err = visit(&rec->type_id, ctx);
|
||||
if (err < 0)
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx)
|
||||
{
|
||||
const struct btf_ext_info *seg;
|
||||
struct btf_ext_info_sec *sec;
|
||||
int i, err;
|
||||
|
||||
seg = &btf_ext->func_info;
|
||||
for_each_btf_ext_sec(seg, sec) {
|
||||
err = visit(&sec->sec_name_off, ctx);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
seg = &btf_ext->line_info;
|
||||
for_each_btf_ext_sec(seg, sec) {
|
||||
struct bpf_line_info_min *rec;
|
||||
|
||||
err = visit(&sec->sec_name_off, ctx);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
for_each_btf_ext_rec(seg, sec, i, rec) {
|
||||
err = visit(&rec->file_name_off, ctx);
|
||||
if (err)
|
||||
return err;
|
||||
err = visit(&rec->line_off, ctx);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
seg = &btf_ext->core_relo_info;
|
||||
for_each_btf_ext_sec(seg, sec) {
|
||||
struct bpf_core_relo *rec;
|
||||
|
||||
err = visit(&sec->sec_name_off, ctx);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
for_each_btf_ext_rec(seg, sec, i, rec) {
|
||||
err = visit(&rec->access_str_off, ctx);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -93,6 +93,8 @@ LIBBPF_API struct btf *libbpf_find_kernel_btf(void);
|
||||
|
||||
LIBBPF_API int btf__find_str(struct btf *btf, const char *s);
|
||||
LIBBPF_API int btf__add_str(struct btf *btf, const char *s);
|
||||
LIBBPF_API int btf__add_type(struct btf *btf, const struct btf *src_btf,
|
||||
const struct btf_type *src_type);
|
||||
|
||||
LIBBPF_API int btf__add_int(struct btf *btf, const char *name, size_t byte_sz, int encoding);
|
||||
LIBBPF_API int btf__add_float(struct btf *btf, const char *name, size_t byte_sz);
|
||||
@ -174,6 +176,7 @@ struct btf_dump_emit_type_decl_opts {
|
||||
int indent_level;
|
||||
/* strip all the const/volatile/restrict mods */
|
||||
bool strip_mods;
|
||||
size_t :0;
|
||||
};
|
||||
#define btf_dump_emit_type_decl_opts__last_field strip_mods
|
||||
|
||||
|
@ -166,11 +166,11 @@ static int btf_dump_resize(struct btf_dump *d)
|
||||
if (last_id <= d->last_id)
|
||||
return 0;
|
||||
|
||||
if (btf_ensure_mem((void **)&d->type_states, &d->type_states_cap,
|
||||
sizeof(*d->type_states), last_id + 1))
|
||||
if (libbpf_ensure_mem((void **)&d->type_states, &d->type_states_cap,
|
||||
sizeof(*d->type_states), last_id + 1))
|
||||
return -ENOMEM;
|
||||
if (btf_ensure_mem((void **)&d->cached_names, &d->cached_names_cap,
|
||||
sizeof(*d->cached_names), last_id + 1))
|
||||
if (libbpf_ensure_mem((void **)&d->cached_names, &d->cached_names_cap,
|
||||
sizeof(*d->cached_names), last_id + 1))
|
||||
return -ENOMEM;
|
||||
|
||||
if (d->last_id == 0) {
|
||||
|
@ -55,10 +55,6 @@
|
||||
#include "libbpf_internal.h"
|
||||
#include "hashmap.h"
|
||||
|
||||
#ifndef EM_BPF
|
||||
#define EM_BPF 247
|
||||
#endif
|
||||
|
||||
#ifndef BPF_FS_MAGIC
|
||||
#define BPF_FS_MAGIC 0xcafe4a11
|
||||
#endif
|
||||
@ -1134,11 +1130,6 @@ static void bpf_object__elf_finish(struct bpf_object *obj)
|
||||
obj->efile.obj_buf_sz = 0;
|
||||
}
|
||||
|
||||
/* if libelf is old and doesn't support mmap(), fall back to read() */
|
||||
#ifndef ELF_C_READ_MMAP
|
||||
#define ELF_C_READ_MMAP ELF_C_READ
|
||||
#endif
|
||||
|
||||
static int bpf_object__elf_init(struct bpf_object *obj)
|
||||
{
|
||||
int err = 0;
|
||||
@ -2808,7 +2799,7 @@ static bool ignore_elf_section(GElf_Shdr *hdr, const char *name)
|
||||
return true;
|
||||
|
||||
/* ignore .llvm_addrsig section as well */
|
||||
if (hdr->sh_type == 0x6FFF4C03 /* SHT_LLVM_ADDRSIG */)
|
||||
if (hdr->sh_type == SHT_LLVM_ADDRSIG)
|
||||
return true;
|
||||
|
||||
/* no subprograms will lead to an empty .text section, ignore it */
|
||||
@ -4868,8 +4859,8 @@ static int load_module_btfs(struct bpf_object *obj)
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
err = btf_ensure_mem((void **)&obj->btf_modules, &obj->btf_module_cap,
|
||||
sizeof(*obj->btf_modules), obj->btf_module_cnt + 1);
|
||||
err = libbpf_ensure_mem((void **)&obj->btf_modules, &obj->btf_module_cap,
|
||||
sizeof(*obj->btf_modules), obj->btf_module_cnt + 1);
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
|
@ -507,6 +507,7 @@ struct xdp_link_info {
|
||||
struct bpf_xdp_set_link_opts {
|
||||
size_t sz;
|
||||
int old_fd;
|
||||
size_t :0;
|
||||
};
|
||||
#define bpf_xdp_set_link_opts__last_field old_fd
|
||||
|
||||
@ -759,6 +760,19 @@ enum libbpf_tristate {
|
||||
TRI_MODULE = 2,
|
||||
};
|
||||
|
||||
struct bpf_linker_opts {
|
||||
/* size of this struct, for forward/backward compatiblity */
|
||||
size_t sz;
|
||||
};
|
||||
#define bpf_linker_opts__last_field sz
|
||||
|
||||
struct bpf_linker;
|
||||
|
||||
LIBBPF_API struct bpf_linker *bpf_linker__new(const char *filename, struct bpf_linker_opts *opts);
|
||||
LIBBPF_API int bpf_linker__add_file(struct bpf_linker *linker, const char *filename);
|
||||
LIBBPF_API int bpf_linker__finalize(struct bpf_linker *linker);
|
||||
LIBBPF_API void bpf_linker__free(struct bpf_linker *linker);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
#endif
|
||||
|
@ -354,4 +354,9 @@ LIBBPF_0.3.0 {
|
||||
LIBBPF_0.4.0 {
|
||||
global:
|
||||
btf__add_float;
|
||||
btf__add_type;
|
||||
bpf_linker__add_file;
|
||||
bpf_linker__finalize;
|
||||
bpf_linker__free;
|
||||
bpf_linker__new;
|
||||
} LIBBPF_0.3.0;
|
||||
|
@ -20,6 +20,26 @@
|
||||
|
||||
#include "libbpf.h"
|
||||
|
||||
#ifndef EM_BPF
|
||||
#define EM_BPF 247
|
||||
#endif
|
||||
|
||||
#ifndef R_BPF_64_64
|
||||
#define R_BPF_64_64 1
|
||||
#endif
|
||||
#ifndef R_BPF_64_32
|
||||
#define R_BPF_64_32 10
|
||||
#endif
|
||||
|
||||
#ifndef SHT_LLVM_ADDRSIG
|
||||
#define SHT_LLVM_ADDRSIG 0x6FFF4C03
|
||||
#endif
|
||||
|
||||
/* if libelf is old and doesn't support mmap(), fall back to read() */
|
||||
#ifndef ELF_C_READ_MMAP
|
||||
#define ELF_C_READ_MMAP ELF_C_READ
|
||||
#endif
|
||||
|
||||
#define BTF_INFO_ENC(kind, kind_flag, vlen) \
|
||||
((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN))
|
||||
#define BTF_TYPE_ENC(name, info, size_or_type) (name), (info), (size_or_type)
|
||||
@ -107,9 +127,14 @@ static inline void *libbpf_reallocarray(void *ptr, size_t nmemb, size_t size)
|
||||
return realloc(ptr, total);
|
||||
}
|
||||
|
||||
void *btf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz,
|
||||
size_t cur_cnt, size_t max_cnt, size_t add_cnt);
|
||||
int btf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_cnt);
|
||||
struct btf;
|
||||
struct btf_type;
|
||||
|
||||
struct btf_type *btf_type_by_id(struct btf *btf, __u32 type_id);
|
||||
|
||||
void *libbpf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz,
|
||||
size_t cur_cnt, size_t max_cnt, size_t add_cnt);
|
||||
int libbpf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_cnt);
|
||||
|
||||
static inline bool libbpf_validate_opts(const char *opts,
|
||||
size_t opts_sz, size_t user_sz,
|
||||
@ -351,4 +376,11 @@ struct bpf_core_relo {
|
||||
enum bpf_core_relo_kind kind;
|
||||
};
|
||||
|
||||
typedef int (*type_id_visit_fn)(__u32 *type_id, void *ctx);
|
||||
typedef int (*str_off_visit_fn)(__u32 *str_off, void *ctx);
|
||||
int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ctx);
|
||||
int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ctx);
|
||||
int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx);
|
||||
int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx);
|
||||
|
||||
#endif /* __LIBBPF_LIBBPF_INTERNAL_H */
|
||||
|
@ -1,75 +0,0 @@
|
||||
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
|
||||
/* Copyright (c) 2019 Facebook */
|
||||
|
||||
#ifndef __LIBBPF_LIBBPF_UTIL_H
|
||||
#define __LIBBPF_LIBBPF_UTIL_H
|
||||
|
||||
#include <stdbool.h>
|
||||
#include <linux/compiler.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* Use these barrier functions instead of smp_[rw]mb() when they are
|
||||
* used in a libbpf header file. That way they can be built into the
|
||||
* application that uses libbpf.
|
||||
*/
|
||||
#if defined(__i386__) || defined(__x86_64__)
|
||||
# define libbpf_smp_store_release(p, v) \
|
||||
do { \
|
||||
asm volatile("" : : : "memory"); \
|
||||
WRITE_ONCE(*p, v); \
|
||||
} while (0)
|
||||
# define libbpf_smp_load_acquire(p) \
|
||||
({ \
|
||||
typeof(*p) ___p1 = READ_ONCE(*p); \
|
||||
asm volatile("" : : : "memory"); \
|
||||
___p1; \
|
||||
})
|
||||
#elif defined(__aarch64__)
|
||||
# define libbpf_smp_store_release(p, v) \
|
||||
asm volatile ("stlr %w1, %0" : "=Q" (*p) : "r" (v) : "memory")
|
||||
# define libbpf_smp_load_acquire(p) \
|
||||
({ \
|
||||
typeof(*p) ___p1; \
|
||||
asm volatile ("ldar %w0, %1" \
|
||||
: "=r" (___p1) : "Q" (*p) : "memory"); \
|
||||
___p1; \
|
||||
})
|
||||
#elif defined(__riscv)
|
||||
# define libbpf_smp_store_release(p, v) \
|
||||
do { \
|
||||
asm volatile ("fence rw,w" : : : "memory"); \
|
||||
WRITE_ONCE(*p, v); \
|
||||
} while (0)
|
||||
# define libbpf_smp_load_acquire(p) \
|
||||
({ \
|
||||
typeof(*p) ___p1 = READ_ONCE(*p); \
|
||||
asm volatile ("fence r,rw" : : : "memory"); \
|
||||
___p1; \
|
||||
})
|
||||
#endif
|
||||
|
||||
#ifndef libbpf_smp_store_release
|
||||
#define libbpf_smp_store_release(p, v) \
|
||||
do { \
|
||||
__sync_synchronize(); \
|
||||
WRITE_ONCE(*p, v); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#ifndef libbpf_smp_load_acquire
|
||||
#define libbpf_smp_load_acquire(p) \
|
||||
({ \
|
||||
typeof(*p) ___p1 = READ_ONCE(*p); \
|
||||
__sync_synchronize(); \
|
||||
___p1; \
|
||||
})
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
#endif
|
||||
|
||||
#endif
|
1944
tools/lib/bpf/linker.c
Normal file
1944
tools/lib/bpf/linker.c
Normal file
File diff suppressed because it is too large
Load Diff
176
tools/lib/bpf/strset.c
Normal file
176
tools/lib/bpf/strset.c
Normal file
@ -0,0 +1,176 @@
|
||||
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
|
||||
/* Copyright (c) 2021 Facebook */
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <errno.h>
|
||||
#include <linux/err.h>
|
||||
#include "hashmap.h"
|
||||
#include "libbpf_internal.h"
|
||||
#include "strset.h"
|
||||
|
||||
struct strset {
|
||||
void *strs_data;
|
||||
size_t strs_data_len;
|
||||
size_t strs_data_cap;
|
||||
size_t strs_data_max_len;
|
||||
|
||||
/* lookup index for each unique string in strings set */
|
||||
struct hashmap *strs_hash;
|
||||
};
|
||||
|
||||
static size_t strset_hash_fn(const void *key, void *ctx)
|
||||
{
|
||||
const struct strset *s = ctx;
|
||||
const char *str = s->strs_data + (long)key;
|
||||
|
||||
return str_hash(str);
|
||||
}
|
||||
|
||||
static bool strset_equal_fn(const void *key1, const void *key2, void *ctx)
|
||||
{
|
||||
const struct strset *s = ctx;
|
||||
const char *str1 = s->strs_data + (long)key1;
|
||||
const char *str2 = s->strs_data + (long)key2;
|
||||
|
||||
return strcmp(str1, str2) == 0;
|
||||
}
|
||||
|
||||
struct strset *strset__new(size_t max_data_sz, const char *init_data, size_t init_data_sz)
|
||||
{
|
||||
struct strset *set = calloc(1, sizeof(*set));
|
||||
struct hashmap *hash;
|
||||
int err = -ENOMEM;
|
||||
|
||||
if (!set)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
hash = hashmap__new(strset_hash_fn, strset_equal_fn, set);
|
||||
if (IS_ERR(hash))
|
||||
goto err_out;
|
||||
|
||||
set->strs_data_max_len = max_data_sz;
|
||||
set->strs_hash = hash;
|
||||
|
||||
if (init_data) {
|
||||
long off;
|
||||
|
||||
set->strs_data = malloc(init_data_sz);
|
||||
if (!set->strs_data)
|
||||
goto err_out;
|
||||
|
||||
memcpy(set->strs_data, init_data, init_data_sz);
|
||||
set->strs_data_len = init_data_sz;
|
||||
set->strs_data_cap = init_data_sz;
|
||||
|
||||
for (off = 0; off < set->strs_data_len; off += strlen(set->strs_data + off) + 1) {
|
||||
/* hashmap__add() returns EEXIST if string with the same
|
||||
* content already is in the hash map
|
||||
*/
|
||||
err = hashmap__add(hash, (void *)off, (void *)off);
|
||||
if (err == -EEXIST)
|
||||
continue; /* duplicate */
|
||||
if (err)
|
||||
goto err_out;
|
||||
}
|
||||
}
|
||||
|
||||
return set;
|
||||
err_out:
|
||||
strset__free(set);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
void strset__free(struct strset *set)
|
||||
{
|
||||
if (IS_ERR_OR_NULL(set))
|
||||
return;
|
||||
|
||||
hashmap__free(set->strs_hash);
|
||||
free(set->strs_data);
|
||||
}
|
||||
|
||||
size_t strset__data_size(const struct strset *set)
|
||||
{
|
||||
return set->strs_data_len;
|
||||
}
|
||||
|
||||
const char *strset__data(const struct strset *set)
|
||||
{
|
||||
return set->strs_data;
|
||||
}
|
||||
|
||||
static void *strset_add_str_mem(struct strset *set, size_t add_sz)
|
||||
{
|
||||
return libbpf_add_mem(&set->strs_data, &set->strs_data_cap, 1,
|
||||
set->strs_data_len, set->strs_data_max_len, add_sz);
|
||||
}
|
||||
|
||||
/* Find string offset that corresponds to a given string *s*.
|
||||
* Returns:
|
||||
* - >0 offset into string data, if string is found;
|
||||
* - -ENOENT, if string is not in the string data;
|
||||
* - <0, on any other error.
|
||||
*/
|
||||
int strset__find_str(struct strset *set, const char *s)
|
||||
{
|
||||
long old_off, new_off, len;
|
||||
void *p;
|
||||
|
||||
/* see strset__add_str() for why we do this */
|
||||
len = strlen(s) + 1;
|
||||
p = strset_add_str_mem(set, len);
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
|
||||
new_off = set->strs_data_len;
|
||||
memcpy(p, s, len);
|
||||
|
||||
if (hashmap__find(set->strs_hash, (void *)new_off, (void **)&old_off))
|
||||
return old_off;
|
||||
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
/* Add a string s to the string data. If the string already exists, return its
|
||||
* offset within string data.
|
||||
* Returns:
|
||||
* - > 0 offset into string data, on success;
|
||||
* - < 0, on error.
|
||||
*/
|
||||
int strset__add_str(struct strset *set, const char *s)
|
||||
{
|
||||
long old_off, new_off, len;
|
||||
void *p;
|
||||
int err;
|
||||
|
||||
/* Hashmap keys are always offsets within set->strs_data, so to even
|
||||
* look up some string from the "outside", we need to first append it
|
||||
* at the end, so that it can be addressed with an offset. Luckily,
|
||||
* until set->strs_data_len is incremented, that string is just a piece
|
||||
* of garbage for the rest of the code, so no harm, no foul. On the
|
||||
* other hand, if the string is unique, it's already appended and
|
||||
* ready to be used, only a simple set->strs_data_len increment away.
|
||||
*/
|
||||
len = strlen(s) + 1;
|
||||
p = strset_add_str_mem(set, len);
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
|
||||
new_off = set->strs_data_len;
|
||||
memcpy(p, s, len);
|
||||
|
||||
/* Now attempt to add the string, but only if the string with the same
|
||||
* contents doesn't exist already (HASHMAP_ADD strategy). If such
|
||||
* string exists, we'll get its offset in old_off (that's old_key).
|
||||
*/
|
||||
err = hashmap__insert(set->strs_hash, (void *)new_off, (void *)new_off,
|
||||
HASHMAP_ADD, (const void **)&old_off, NULL);
|
||||
if (err == -EEXIST)
|
||||
return old_off; /* duplicated string, return existing offset */
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
set->strs_data_len += len; /* new unique string, adjust data length */
|
||||
return new_off;
|
||||
}
|
21
tools/lib/bpf/strset.h
Normal file
21
tools/lib/bpf/strset.h
Normal file
@ -0,0 +1,21 @@
|
||||
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
|
||||
|
||||
/* Copyright (c) 2021 Facebook */
|
||||
#ifndef __LIBBPF_STRSET_H
|
||||
#define __LIBBPF_STRSET_H
|
||||
|
||||
#include <stdbool.h>
|
||||
#include <stddef.h>
|
||||
|
||||
struct strset;
|
||||
|
||||
struct strset *strset__new(size_t max_data_sz, const char *init_data, size_t init_data_sz);
|
||||
void strset__free(struct strset *set);
|
||||
|
||||
const char *strset__data(const struct strset *set);
|
||||
size_t strset__data_size(const struct strset *set);
|
||||
|
||||
int strset__find_str(struct strset *set, const char *s);
|
||||
int strset__add_str(struct strset *set, const char *s);
|
||||
|
||||
#endif /* __LIBBPF_STRSET_H */
|
@ -3,7 +3,8 @@
|
||||
/*
|
||||
* AF_XDP user-space access library.
|
||||
*
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation.
|
||||
* Copyright (c) 2018 - 2019 Intel Corporation.
|
||||
* Copyright (c) 2019 Facebook
|
||||
*
|
||||
* Author(s): Magnus Karlsson <magnus.karlsson@intel.com>
|
||||
*/
|
||||
@ -13,15 +14,80 @@
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
#include <linux/if_xdp.h>
|
||||
|
||||
#include "libbpf.h"
|
||||
#include "libbpf_util.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* Load-Acquire Store-Release barriers used by the XDP socket
|
||||
* library. The following macros should *NOT* be considered part of
|
||||
* the xsk.h API, and is subject to change anytime.
|
||||
*
|
||||
* LIBRARY INTERNAL
|
||||
*/
|
||||
|
||||
#define __XSK_READ_ONCE(x) (*(volatile typeof(x) *)&x)
|
||||
#define __XSK_WRITE_ONCE(x, v) (*(volatile typeof(x) *)&x) = (v)
|
||||
|
||||
#if defined(__i386__) || defined(__x86_64__)
|
||||
# define libbpf_smp_store_release(p, v) \
|
||||
do { \
|
||||
asm volatile("" : : : "memory"); \
|
||||
__XSK_WRITE_ONCE(*p, v); \
|
||||
} while (0)
|
||||
# define libbpf_smp_load_acquire(p) \
|
||||
({ \
|
||||
typeof(*p) ___p1 = __XSK_READ_ONCE(*p); \
|
||||
asm volatile("" : : : "memory"); \
|
||||
___p1; \
|
||||
})
|
||||
#elif defined(__aarch64__)
|
||||
# define libbpf_smp_store_release(p, v) \
|
||||
asm volatile ("stlr %w1, %0" : "=Q" (*p) : "r" (v) : "memory")
|
||||
# define libbpf_smp_load_acquire(p) \
|
||||
({ \
|
||||
typeof(*p) ___p1; \
|
||||
asm volatile ("ldar %w0, %1" \
|
||||
: "=r" (___p1) : "Q" (*p) : "memory"); \
|
||||
___p1; \
|
||||
})
|
||||
#elif defined(__riscv)
|
||||
# define libbpf_smp_store_release(p, v) \
|
||||
do { \
|
||||
asm volatile ("fence rw,w" : : : "memory"); \
|
||||
__XSK_WRITE_ONCE(*p, v); \
|
||||
} while (0)
|
||||
# define libbpf_smp_load_acquire(p) \
|
||||
({ \
|
||||
typeof(*p) ___p1 = __XSK_READ_ONCE(*p); \
|
||||
asm volatile ("fence r,rw" : : : "memory"); \
|
||||
___p1; \
|
||||
})
|
||||
#endif
|
||||
|
||||
#ifndef libbpf_smp_store_release
|
||||
#define libbpf_smp_store_release(p, v) \
|
||||
do { \
|
||||
__sync_synchronize(); \
|
||||
__XSK_WRITE_ONCE(*p, v); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#ifndef libbpf_smp_load_acquire
|
||||
#define libbpf_smp_load_acquire(p) \
|
||||
({ \
|
||||
typeof(*p) ___p1 = __XSK_READ_ONCE(*p); \
|
||||
__sync_synchronize(); \
|
||||
___p1; \
|
||||
})
|
||||
#endif
|
||||
|
||||
/* LIBRARY INTERNAL -- END */
|
||||
|
||||
/* Do not access these members directly. Use the functions below. */
|
||||
#define DEFINE_XSK_RING(name) \
|
||||
struct name { \
|
||||
|
@ -21,7 +21,7 @@ endif
|
||||
|
||||
BPF_GCC ?= $(shell command -v bpf-gcc;)
|
||||
SAN_CFLAGS ?=
|
||||
CFLAGS += -g -rdynamic -Wall -O2 $(GENFLAGS) $(SAN_CFLAGS) \
|
||||
CFLAGS += -g -Og -rdynamic -Wall $(GENFLAGS) $(SAN_CFLAGS) \
|
||||
-I$(CURDIR) -I$(INCLUDE_DIR) -I$(GENDIR) -I$(LIBDIR) \
|
||||
-I$(TOOLSINCDIR) -I$(APIDIR) -I$(OUTPUT) \
|
||||
-Dbpf_prog_load=bpf_prog_test_load \
|
||||
@ -201,6 +201,7 @@ $(DEFAULT_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \
|
||||
$(HOST_BPFOBJ) | $(HOST_BUILD_DIR)/bpftool
|
||||
$(Q)$(MAKE) $(submake_extras) -C $(BPFTOOLDIR) \
|
||||
CC=$(HOSTCC) LD=$(HOSTLD) \
|
||||
EXTRA_CFLAGS='-g -Og' \
|
||||
OUTPUT=$(HOST_BUILD_DIR)/bpftool/ \
|
||||
prefix= DESTDIR=$(HOST_SCRATCH_DIR)/ install
|
||||
|
||||
@ -218,6 +219,7 @@ $(BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \
|
||||
../../../include/uapi/linux/bpf.h \
|
||||
| $(INCLUDE_DIR) $(BUILD_DIR)/libbpf
|
||||
$(Q)$(MAKE) $(submake_extras) -C $(BPFDIR) OUTPUT=$(BUILD_DIR)/libbpf/ \
|
||||
EXTRA_CFLAGS='-g -Og' \
|
||||
DESTDIR=$(SCRATCH_DIR) prefix= all install_headers
|
||||
|
||||
ifneq ($(BPFOBJ),$(HOST_BPFOBJ))
|
||||
@ -225,11 +227,12 @@ $(HOST_BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \
|
||||
../../../include/uapi/linux/bpf.h \
|
||||
| $(INCLUDE_DIR) $(HOST_BUILD_DIR)/libbpf
|
||||
$(Q)$(MAKE) $(submake_extras) -C $(BPFDIR) \
|
||||
OUTPUT=$(HOST_BUILD_DIR)/libbpf/ CC=$(HOSTCC) LD=$(HOSTLD) \
|
||||
EXTRA_CFLAGS='-g -Og' \
|
||||
OUTPUT=$(HOST_BUILD_DIR)/libbpf/ CC=$(HOSTCC) LD=$(HOSTLD) \
|
||||
DESTDIR=$(HOST_SCRATCH_DIR)/ prefix= all install_headers
|
||||
endif
|
||||
|
||||
$(INCLUDE_DIR)/vmlinux.h: $(VMLINUX_BTF) | $(BPFTOOL) $(INCLUDE_DIR)
|
||||
$(INCLUDE_DIR)/vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL) | $(INCLUDE_DIR)
|
||||
ifeq ($(VMLINUX_H),)
|
||||
$(call msg,GEN,,$@)
|
||||
$(Q)$(BPFTOOL) btf dump file $(VMLINUX_BTF) format c > $@
|
||||
@ -300,6 +303,10 @@ endef
|
||||
|
||||
SKEL_BLACKLIST := btf__% test_pinning_invalid.c test_sk_assign.c
|
||||
|
||||
LINKED_SKELS := test_static_linked.skel.h
|
||||
|
||||
test_static_linked.skel.h-deps := test_static_linked1.o test_static_linked2.o
|
||||
|
||||
# Set up extra TRUNNER_XXX "temporary" variables in the environment (relies on
|
||||
# $eval()) and pass control to DEFINE_TEST_RUNNER_RULES.
|
||||
# Parameters:
|
||||
@ -320,6 +327,7 @@ TRUNNER_BPF_OBJS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.o, $$(TRUNNER_BPF_SRCS)
|
||||
TRUNNER_BPF_SKELS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.skel.h, \
|
||||
$$(filter-out $(SKEL_BLACKLIST), \
|
||||
$$(TRUNNER_BPF_SRCS)))
|
||||
TRUNNER_BPF_SKELS_LINKED := $$(addprefix $$(TRUNNER_OUTPUT)/,$(LINKED_SKELS))
|
||||
TEST_GEN_FILES += $$(TRUNNER_BPF_OBJS)
|
||||
|
||||
# Evaluate rules now with extra TRUNNER_XXX variables above already defined
|
||||
@ -352,11 +360,22 @@ $(TRUNNER_BPF_OBJS): $(TRUNNER_OUTPUT)/%.o: \
|
||||
$$(call $(TRUNNER_BPF_BUILD_RULE),$$<,$$@, \
|
||||
$(TRUNNER_BPF_CFLAGS))
|
||||
|
||||
$(TRUNNER_BPF_SKELS): $(TRUNNER_OUTPUT)/%.skel.h: \
|
||||
$(TRUNNER_OUTPUT)/%.o \
|
||||
| $(BPFTOOL) $(TRUNNER_OUTPUT)
|
||||
$(TRUNNER_BPF_SKELS): %.skel.h: %.o $(BPFTOOL) | $(TRUNNER_OUTPUT)
|
||||
$$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@)
|
||||
$(Q)$$(BPFTOOL) gen skeleton $$< > $$@
|
||||
$(Q)$$(BPFTOOL) gen object $$(<:.o=.linked1.o) $$<
|
||||
$(Q)$$(BPFTOOL) gen object $$(<:.o=.linked2.o) $$(<:.o=.linked1.o)
|
||||
$(Q)$$(BPFTOOL) gen object $$(<:.o=.linked3.o) $$(<:.o=.linked2.o)
|
||||
$(Q)diff $$(<:.o=.linked2.o) $$(<:.o=.linked3.o)
|
||||
$(Q)$$(BPFTOOL) gen skeleton $$(<:.o=.linked3.o) name $$(notdir $$(<:.o=)) > $$@
|
||||
|
||||
$(TRUNNER_BPF_SKELS_LINKED): $(TRUNNER_BPF_OBJS) $(BPFTOOL) | $(TRUNNER_OUTPUT)
|
||||
$$(call msg,LINK-BPF,$(TRUNNER_BINARY),$$(@:.skel.h=.o))
|
||||
$(Q)$$(BPFTOOL) gen object $$(@:.skel.h=.linked1.o) $$(addprefix $(TRUNNER_OUTPUT)/,$$($$(@F)-deps))
|
||||
$(Q)$$(BPFTOOL) gen object $$(@:.skel.h=.linked2.o) $$(@:.skel.h=.linked1.o)
|
||||
$(Q)$$(BPFTOOL) gen object $$(@:.skel.h=.linked3.o) $$(@:.skel.h=.linked2.o)
|
||||
$(Q)diff $$(@:.skel.h=.linked2.o) $$(@:.skel.h=.linked3.o)
|
||||
$$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@)
|
||||
$(Q)$$(BPFTOOL) gen skeleton $$(@:.skel.h=.linked3.o) name $$(notdir $$(@:.skel.h=)) > $$@
|
||||
endif
|
||||
|
||||
# ensure we set up tests.h header generation rule just once
|
||||
@ -378,6 +397,7 @@ $(TRUNNER_TEST_OBJS): $(TRUNNER_OUTPUT)/%.test.o: \
|
||||
$(TRUNNER_EXTRA_HDRS) \
|
||||
$(TRUNNER_BPF_OBJS) \
|
||||
$(TRUNNER_BPF_SKELS) \
|
||||
$(TRUNNER_BPF_SKELS_LINKED) \
|
||||
$$(BPFOBJ) | $(TRUNNER_OUTPUT)
|
||||
$$(call msg,TEST-OBJ,$(TRUNNER_BINARY),$$@)
|
||||
$(Q)cd $$(@D) && $$(CC) -I. $$(CFLAGS) -c $(CURDIR)/$$< $$(LDLIBS) -o $$(@F)
|
||||
|
@ -57,6 +57,10 @@ int main(int argc, char **argv)
|
||||
__u32 key = 0, pid;
|
||||
int exit_code = 1;
|
||||
char buf[256];
|
||||
const struct timespec req = {
|
||||
.tv_sec = 1,
|
||||
.tv_nsec = 0,
|
||||
};
|
||||
|
||||
cgroup_fd = cgroup_setup_and_join(TEST_CGROUP);
|
||||
if (CHECK(cgroup_fd < 0, "cgroup_setup_and_join", "err %d errno %d\n", cgroup_fd, errno))
|
||||
@ -115,7 +119,7 @@ int main(int argc, char **argv)
|
||||
goto close_pmu;
|
||||
|
||||
/* trigger some syscalls */
|
||||
sleep(1);
|
||||
syscall(__NR_nanosleep, &req, NULL);
|
||||
|
||||
err = bpf_map_lookup_elem(cgidmap_fd, &key, &kcgid);
|
||||
if (CHECK(err, "bpf_map_lookup_elem", "err %d errno %d\n", err, errno))
|
||||
|
@ -55,7 +55,6 @@ void test_array_map_batch_ops(void)
|
||||
int map_fd, *keys, *values, *visited;
|
||||
__u32 count, total, total_success;
|
||||
const __u32 max_entries = 10;
|
||||
bool nospace_err;
|
||||
__u64 batch = 0;
|
||||
int err, step;
|
||||
DECLARE_LIBBPF_OPTS(bpf_map_batch_opts, opts,
|
||||
@ -90,7 +89,6 @@ void test_array_map_batch_ops(void)
|
||||
* elements each.
|
||||
*/
|
||||
count = step;
|
||||
nospace_err = false;
|
||||
while (true) {
|
||||
err = bpf_map_lookup_batch(map_fd,
|
||||
total ? &batch : NULL, &batch,
|
||||
@ -107,9 +105,6 @@ void test_array_map_batch_ops(void)
|
||||
|
||||
}
|
||||
|
||||
if (nospace_err == true)
|
||||
continue;
|
||||
|
||||
CHECK(total != max_entries, "lookup with steps",
|
||||
"total = %u, max_entries = %u\n", total, max_entries);
|
||||
|
||||
|
40
tools/testing/selftests/bpf/prog_tests/static_linked.c
Normal file
40
tools/testing/selftests/bpf/prog_tests/static_linked.c
Normal file
@ -0,0 +1,40 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2019 Facebook */
|
||||
|
||||
#include <test_progs.h>
|
||||
#include "test_static_linked.skel.h"
|
||||
|
||||
void test_static_linked(void)
|
||||
{
|
||||
int err;
|
||||
struct test_static_linked* skel;
|
||||
|
||||
skel = test_static_linked__open();
|
||||
if (!ASSERT_OK_PTR(skel, "skel_open"))
|
||||
return;
|
||||
|
||||
skel->rodata->rovar1 = 1;
|
||||
skel->bss->static_var1 = 2;
|
||||
skel->bss->static_var11 = 3;
|
||||
|
||||
skel->rodata->rovar2 = 4;
|
||||
skel->bss->static_var2 = 5;
|
||||
skel->bss->static_var22 = 6;
|
||||
|
||||
err = test_static_linked__load(skel);
|
||||
if (!ASSERT_OK(err, "skel_load"))
|
||||
goto cleanup;
|
||||
|
||||
err = test_static_linked__attach(skel);
|
||||
if (!ASSERT_OK(err, "skel_attach"))
|
||||
goto cleanup;
|
||||
|
||||
/* trigger */
|
||||
usleep(1);
|
||||
|
||||
ASSERT_EQ(skel->bss->var1, 1 * 2 + 2 + 3, "var1");
|
||||
ASSERT_EQ(skel->bss->var2, 4 * 3 + 5 + 6, "var2");
|
||||
|
||||
cleanup:
|
||||
test_static_linked__destroy(skel);
|
||||
}
|
@ -57,6 +57,27 @@ static __inline int bind_to_device(struct bpf_sock_addr *ctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __inline int bind_reuseport(struct bpf_sock_addr *ctx)
|
||||
{
|
||||
int val = 1;
|
||||
|
||||
if (bpf_setsockopt(ctx, SOL_SOCKET, SO_REUSEPORT,
|
||||
&val, sizeof(val)))
|
||||
return 1;
|
||||
if (bpf_getsockopt(ctx, SOL_SOCKET, SO_REUSEPORT,
|
||||
&val, sizeof(val)) || !val)
|
||||
return 1;
|
||||
val = 0;
|
||||
if (bpf_setsockopt(ctx, SOL_SOCKET, SO_REUSEPORT,
|
||||
&val, sizeof(val)))
|
||||
return 1;
|
||||
if (bpf_getsockopt(ctx, SOL_SOCKET, SO_REUSEPORT,
|
||||
&val, sizeof(val)) || val)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __inline int misc_opts(struct bpf_sock_addr *ctx, int opt)
|
||||
{
|
||||
int old, tmp, new = 0xeb9f;
|
||||
@ -127,6 +148,10 @@ int bind_v4_prog(struct bpf_sock_addr *ctx)
|
||||
if (misc_opts(ctx, SO_MARK) || misc_opts(ctx, SO_PRIORITY))
|
||||
return 0;
|
||||
|
||||
/* Set reuseport and unset */
|
||||
if (bind_reuseport(ctx))
|
||||
return 0;
|
||||
|
||||
ctx->user_ip4 = bpf_htonl(SERV4_REWRITE_IP);
|
||||
ctx->user_port = bpf_htons(SERV4_REWRITE_PORT);
|
||||
|
||||
|
@ -63,6 +63,27 @@ static __inline int bind_to_device(struct bpf_sock_addr *ctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __inline int bind_reuseport(struct bpf_sock_addr *ctx)
|
||||
{
|
||||
int val = 1;
|
||||
|
||||
if (bpf_setsockopt(ctx, SOL_SOCKET, SO_REUSEPORT,
|
||||
&val, sizeof(val)))
|
||||
return 1;
|
||||
if (bpf_getsockopt(ctx, SOL_SOCKET, SO_REUSEPORT,
|
||||
&val, sizeof(val)) || !val)
|
||||
return 1;
|
||||
val = 0;
|
||||
if (bpf_setsockopt(ctx, SOL_SOCKET, SO_REUSEPORT,
|
||||
&val, sizeof(val)))
|
||||
return 1;
|
||||
if (bpf_getsockopt(ctx, SOL_SOCKET, SO_REUSEPORT,
|
||||
&val, sizeof(val)) || val)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __inline int misc_opts(struct bpf_sock_addr *ctx, int opt)
|
||||
{
|
||||
int old, tmp, new = 0xeb9f;
|
||||
@ -141,6 +162,10 @@ int bind_v6_prog(struct bpf_sock_addr *ctx)
|
||||
if (misc_opts(ctx, SO_MARK) || misc_opts(ctx, SO_PRIORITY))
|
||||
return 0;
|
||||
|
||||
/* Set reuseport and unset */
|
||||
if (bind_reuseport(ctx))
|
||||
return 0;
|
||||
|
||||
ctx->user_ip6[0] = bpf_htonl(SERV6_REWRITE_IP_0);
|
||||
ctx->user_ip6[1] = bpf_htonl(SERV6_REWRITE_IP_1);
|
||||
ctx->user_ip6[2] = bpf_htonl(SERV6_REWRITE_IP_2);
|
||||
|
@ -64,7 +64,7 @@ __u64 test7_result = 0;
|
||||
SEC("fentry/bpf_fentry_test7")
|
||||
int BPF_PROG(test7, struct bpf_fentry_test_t *arg)
|
||||
{
|
||||
if (arg == 0)
|
||||
if (!arg)
|
||||
test7_result = 1;
|
||||
return 0;
|
||||
}
|
||||
|
@ -65,7 +65,7 @@ __u64 test7_result = 0;
|
||||
SEC("fexit/bpf_fentry_test7")
|
||||
int BPF_PROG(test7, struct bpf_fentry_test_t *arg)
|
||||
{
|
||||
if (arg == 0)
|
||||
if (!arg)
|
||||
test7_result = 1;
|
||||
return 0;
|
||||
}
|
||||
@ -74,7 +74,7 @@ __u64 test8_result = 0;
|
||||
SEC("fexit/bpf_fentry_test8")
|
||||
int BPF_PROG(test8, struct bpf_fentry_test_t *arg)
|
||||
{
|
||||
if (arg->a == 0)
|
||||
if (!arg->a)
|
||||
test8_result = 1;
|
||||
return 0;
|
||||
}
|
||||
|
@ -4,7 +4,6 @@
|
||||
#include <bpf/bpf_core_read.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
|
||||
#define NULL 0
|
||||
#define INLINE __always_inline
|
||||
|
||||
#define skb_shorter(skb, len) ((void *)(long)(skb)->data + (len) > (void *)(long)skb->data_end)
|
||||
|
@ -14,7 +14,7 @@ struct Big {
|
||||
|
||||
__noinline int foo(const struct Big *big)
|
||||
{
|
||||
if (big == 0)
|
||||
if (!big)
|
||||
return 0;
|
||||
|
||||
return bpf_get_prandom_u32() < big->y;
|
||||
|
30
tools/testing/selftests/bpf/progs/test_static_linked1.c
Normal file
30
tools/testing/selftests/bpf/progs/test_static_linked1.c
Normal file
@ -0,0 +1,30 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2021 Facebook */
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
|
||||
/* 8-byte aligned .bss */
|
||||
static volatile long static_var1;
|
||||
static volatile int static_var11;
|
||||
int var1 = 0;
|
||||
/* 4-byte aligned .rodata */
|
||||
const volatile int rovar1;
|
||||
|
||||
/* same "subprog" name in both files */
|
||||
static __noinline int subprog(int x)
|
||||
{
|
||||
/* but different formula */
|
||||
return x * 2;
|
||||
}
|
||||
|
||||
SEC("raw_tp/sys_enter")
|
||||
int handler1(const void *ctx)
|
||||
{
|
||||
var1 = subprog(rovar1) + static_var1 + static_var11;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
char LICENSE[] SEC("license") = "GPL";
|
||||
int VERSION SEC("version") = 1;
|
31
tools/testing/selftests/bpf/progs/test_static_linked2.c
Normal file
31
tools/testing/selftests/bpf/progs/test_static_linked2.c
Normal file
@ -0,0 +1,31 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2021 Facebook */
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
|
||||
/* 4-byte aligned .bss */
|
||||
static volatile int static_var2;
|
||||
static volatile int static_var22;
|
||||
int var2 = 0;
|
||||
/* 8-byte aligned .rodata */
|
||||
const volatile long rovar2;
|
||||
|
||||
/* same "subprog" name in both files */
|
||||
static __noinline int subprog(int x)
|
||||
{
|
||||
/* but different formula */
|
||||
return x * 3;
|
||||
}
|
||||
|
||||
SEC("raw_tp/sys_enter")
|
||||
int handler2(const void *ctx)
|
||||
{
|
||||
var2 = subprog(rovar2) + static_var2 + static_var22;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* different name and/or type of the variable doesn't matter */
|
||||
char _license[] SEC("license") = "GPL";
|
||||
int _version SEC("version") = 1;
|
@ -297,7 +297,7 @@ static void xsk_configure_umem(struct ifobject *data, void *buffer, u64 size)
|
||||
static void xsk_populate_fill_ring(struct xsk_umem_info *umem)
|
||||
{
|
||||
int ret, i;
|
||||
u32 idx;
|
||||
u32 idx = 0;
|
||||
|
||||
ret = xsk_ring_prod__reserve(&umem->fq, XSK_RING_PROD__DEFAULT_NUM_DESCS, &idx);
|
||||
if (ret != XSK_RING_PROD__DEFAULT_NUM_DESCS)
|
||||
@ -584,7 +584,7 @@ static void rx_pkt(struct xsk_socket_info *xsk, struct pollfd *fds)
|
||||
|
||||
static void tx_only(struct xsk_socket_info *xsk, u32 *frameptr, int batch_size)
|
||||
{
|
||||
u32 idx;
|
||||
u32 idx = 0;
|
||||
unsigned int i;
|
||||
bool tx_invalid_test = stat_test_type == STAT_TEST_TX_INVALID;
|
||||
u32 len = tx_invalid_test ? XSK_UMEM__DEFAULT_FRAME_SIZE + 1 : PKT_SIZE;
|
||||
|
Loading…
Reference in New Issue
Block a user