forked from Minki/linux
91c960b005
A subsequent patch will add additional atomic operations. These new operations will use the same opcode field as the existing XADD, with the immediate discriminating different operations. In preparation, rename the instruction mode BPF_ATOMIC and start calling the zero immediate BPF_ADD. This is possible (doesn't break existing valid BPF progs) because the immediate field is currently reserved MBZ and BPF_ADD is zero. All uses are removed from the tree but the BPF_XADD definition is kept around to avoid breaking builds for people including kernel headers. Signed-off-by: Brendan Jackman <jackmanb@google.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Björn Töpel <bjorn.topel@gmail.com> Link: https://lore.kernel.org/bpf/20210114181751.768687-5-jackmanb@google.com
107 lines
2.9 KiB
C
107 lines
2.9 KiB
C
/* eBPF example program:
|
|
* - creates arraymap in kernel with key 4 bytes and value 8 bytes
|
|
*
|
|
* - loads eBPF program:
|
|
* r0 = skb->data[ETH_HLEN + offsetof(struct iphdr, protocol)];
|
|
* *(u32*)(fp - 4) = r0;
|
|
* // assuming packet is IPv4, lookup ip->proto in a map
|
|
* value = bpf_map_lookup_elem(map_fd, fp - 4);
|
|
* if (value)
|
|
* (*(u64*)value) += 1;
|
|
*
|
|
* - attaches this program to loopback interface "lo" raw socket
|
|
*
|
|
* - every second user space reads map[tcp], map[udp], map[icmp] to see
|
|
* how many packets of given protocol were seen on "lo"
|
|
*/
|
|
#include <stdio.h>
|
|
#include <unistd.h>
|
|
#include <assert.h>
|
|
#include <linux/bpf.h>
|
|
#include <string.h>
|
|
#include <stdlib.h>
|
|
#include <errno.h>
|
|
#include <sys/socket.h>
|
|
#include <arpa/inet.h>
|
|
#include <linux/if_ether.h>
|
|
#include <linux/ip.h>
|
|
#include <stddef.h>
|
|
#include <bpf/bpf.h>
|
|
#include "bpf_insn.h"
|
|
#include "sock_example.h"
|
|
|
|
char bpf_log_buf[BPF_LOG_BUF_SIZE];
|
|
|
|
static int test_sock(void)
|
|
{
|
|
int sock = -1, map_fd, prog_fd, i, key;
|
|
long long value = 0, tcp_cnt, udp_cnt, icmp_cnt;
|
|
|
|
map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(key), sizeof(value),
|
|
256, 0);
|
|
if (map_fd < 0) {
|
|
printf("failed to create map '%s'\n", strerror(errno));
|
|
goto cleanup;
|
|
}
|
|
|
|
struct bpf_insn prog[] = {
|
|
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
|
BPF_LD_ABS(BPF_B, ETH_HLEN + offsetof(struct iphdr, protocol) /* R0 = ip->proto */),
|
|
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), /* *(u32 *)(fp - 4) = r0 */
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), /* r2 = fp - 4 */
|
|
BPF_LD_MAP_FD(BPF_REG_1, map_fd),
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
|
|
BPF_MOV64_IMM(BPF_REG_1, 1), /* r1 = 1 */
|
|
BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_0, BPF_REG_1, 0),
|
|
BPF_MOV64_IMM(BPF_REG_0, 0), /* r0 = 0 */
|
|
BPF_EXIT_INSN(),
|
|
};
|
|
size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
|
|
|
|
prog_fd = bpf_load_program(BPF_PROG_TYPE_SOCKET_FILTER, prog, insns_cnt,
|
|
"GPL", 0, bpf_log_buf, BPF_LOG_BUF_SIZE);
|
|
if (prog_fd < 0) {
|
|
printf("failed to load prog '%s'\n", strerror(errno));
|
|
goto cleanup;
|
|
}
|
|
|
|
sock = open_raw_sock("lo");
|
|
|
|
if (setsockopt(sock, SOL_SOCKET, SO_ATTACH_BPF, &prog_fd,
|
|
sizeof(prog_fd)) < 0) {
|
|
printf("setsockopt %s\n", strerror(errno));
|
|
goto cleanup;
|
|
}
|
|
|
|
for (i = 0; i < 10; i++) {
|
|
key = IPPROTO_TCP;
|
|
assert(bpf_map_lookup_elem(map_fd, &key, &tcp_cnt) == 0);
|
|
|
|
key = IPPROTO_UDP;
|
|
assert(bpf_map_lookup_elem(map_fd, &key, &udp_cnt) == 0);
|
|
|
|
key = IPPROTO_ICMP;
|
|
assert(bpf_map_lookup_elem(map_fd, &key, &icmp_cnt) == 0);
|
|
|
|
printf("TCP %lld UDP %lld ICMP %lld packets\n",
|
|
tcp_cnt, udp_cnt, icmp_cnt);
|
|
sleep(1);
|
|
}
|
|
|
|
cleanup:
|
|
/* maps, programs, raw sockets will auto cleanup on process exit */
|
|
return 0;
|
|
}
|
|
|
|
int main(void)
|
|
{
|
|
FILE *f;
|
|
|
|
f = popen("ping -4 -c5 localhost", "r");
|
|
(void)f;
|
|
|
|
return test_sock();
|
|
}
|