Merge remote-tracking branch 'torvalds/master' into perf/core

To pick up fixes.

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
Arnaldo Carvalho de Melo
2022-01-10 15:35:41 -03:00
1013 changed files with 64579 additions and 12643 deletions

View File

@@ -3925,6 +3925,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
bool draining = false;
trace->live = true;
signal(SIGCHLD, sig_handler);
if (!trace->raw_augmented_syscalls) {
if (trace->trace_syscalls && trace__add_syscall_newtp(trace))
@@ -4876,7 +4877,6 @@ int cmd_trace(int argc, const char **argv)
signal(SIGSEGV, sighandler_dump_stack);
signal(SIGFPE, sighandler_dump_stack);
signal(SIGCHLD, sig_handler);
signal(SIGINT, sig_handler);
trace.evlist = evlist__new();

View File

@@ -24,16 +24,6 @@
#include "util/parse-sublevel-options.h"
#include <linux/ctype.h>
#include <traceevent/event-parse.h>
#define MAKE_LIBTRACEEVENT_VERSION(a, b, c) ((a)*255*255+(b)*255+(c))
#ifndef LIBTRACEEVENT_VERSION
/*
* If LIBTRACEEVENT_VERSION wasn't computed then set to version 1.1.0 that ships
* with the Linux kernel tools.
*/
#define LIBTRACEEVENT_VERSION MAKE_LIBTRACEEVENT_VERSION(1, 1, 0)
#endif
int verbose;
int debug_peo_args;
@@ -238,15 +228,6 @@ int perf_debug_option(const char *str)
/* Allow only verbose value in range (0, 10), otherwise set 0. */
verbose = (verbose < 0) || (verbose > 10) ? 0 : verbose;
#if MAKE_LIBTRACEEVENT_VERSION(1, 3, 0) <= LIBTRACEEVENT_VERSION
if (verbose == 1)
tep_set_loglevel(TEP_LOG_INFO);
else if (verbose == 2)
tep_set_loglevel(TEP_LOG_DEBUG);
else if (verbose >= 3)
tep_set_loglevel(TEP_LOG_ALL);
#endif
return 0;
}

View File

@@ -4,7 +4,7 @@
ARCH ?= $(shell uname -m 2>/dev/null || echo not)
ifneq (,$(filter $(ARCH),aarch64 arm64))
ARM64_SUBTARGETS ?= tags signal pauth fp mte bti
ARM64_SUBTARGETS ?= tags signal pauth fp mte bti abi
else
ARM64_SUBTARGETS :=
endif

View File

@@ -0,0 +1 @@
syscall-abi

View File

@@ -0,0 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
# Copyright (C) 2021 ARM Limited
TEST_GEN_PROGS := syscall-abi
include ../../lib.mk
$(OUTPUT)/syscall-abi: syscall-abi.c syscall-abi-asm.S

View File

@@ -0,0 +1,240 @@
// SPDX-License-Identifier: GPL-2.0-only
// Copyright (C) 2021 ARM Limited.
//
// Assembly portion of the syscall ABI test
//
// Load values from memory into registers, invoke a syscall and save the
// register values back to memory for later checking. The syscall to be
// invoked is configured in x8 of the input GPR data.
//
// x0: SVE VL, 0 for FP only
//
// GPRs: gpr_in, gpr_out
// FPRs: fpr_in, fpr_out
// Zn: z_in, z_out
// Pn: p_in, p_out
// FFR: ffr_in, ffr_out
.arch_extension sve
.globl do_syscall
do_syscall:
// Store callee saved registers x19-x29 (80 bytes) plus x0 and x1
stp x29, x30, [sp, #-112]!
mov x29, sp
stp x0, x1, [sp, #16]
stp x19, x20, [sp, #32]
stp x21, x22, [sp, #48]
stp x23, x24, [sp, #64]
stp x25, x26, [sp, #80]
stp x27, x28, [sp, #96]
// Load GPRs x8-x28, and save our SP/FP for later comparison
ldr x2, =gpr_in
add x2, x2, #64
ldp x8, x9, [x2], #16
ldp x10, x11, [x2], #16
ldp x12, x13, [x2], #16
ldp x14, x15, [x2], #16
ldp x16, x17, [x2], #16
ldp x18, x19, [x2], #16
ldp x20, x21, [x2], #16
ldp x22, x23, [x2], #16
ldp x24, x25, [x2], #16
ldp x26, x27, [x2], #16
ldr x28, [x2], #8
str x29, [x2], #8 // FP
str x30, [x2], #8 // LR
// Load FPRs if we're not doing SVE
cbnz x0, 1f
ldr x2, =fpr_in
ldp q0, q1, [x2]
ldp q2, q3, [x2, #16 * 2]
ldp q4, q5, [x2, #16 * 4]
ldp q6, q7, [x2, #16 * 6]
ldp q8, q9, [x2, #16 * 8]
ldp q10, q11, [x2, #16 * 10]
ldp q12, q13, [x2, #16 * 12]
ldp q14, q15, [x2, #16 * 14]
ldp q16, q17, [x2, #16 * 16]
ldp q18, q19, [x2, #16 * 18]
ldp q20, q21, [x2, #16 * 20]
ldp q22, q23, [x2, #16 * 22]
ldp q24, q25, [x2, #16 * 24]
ldp q26, q27, [x2, #16 * 26]
ldp q28, q29, [x2, #16 * 28]
ldp q30, q31, [x2, #16 * 30]
1:
// Load the SVE registers if we're doing SVE
cbz x0, 1f
ldr x2, =z_in
ldr z0, [x2, #0, MUL VL]
ldr z1, [x2, #1, MUL VL]
ldr z2, [x2, #2, MUL VL]
ldr z3, [x2, #3, MUL VL]
ldr z4, [x2, #4, MUL VL]
ldr z5, [x2, #5, MUL VL]
ldr z6, [x2, #6, MUL VL]
ldr z7, [x2, #7, MUL VL]
ldr z8, [x2, #8, MUL VL]
ldr z9, [x2, #9, MUL VL]
ldr z10, [x2, #10, MUL VL]
ldr z11, [x2, #11, MUL VL]
ldr z12, [x2, #12, MUL VL]
ldr z13, [x2, #13, MUL VL]
ldr z14, [x2, #14, MUL VL]
ldr z15, [x2, #15, MUL VL]
ldr z16, [x2, #16, MUL VL]
ldr z17, [x2, #17, MUL VL]
ldr z18, [x2, #18, MUL VL]
ldr z19, [x2, #19, MUL VL]
ldr z20, [x2, #20, MUL VL]
ldr z21, [x2, #21, MUL VL]
ldr z22, [x2, #22, MUL VL]
ldr z23, [x2, #23, MUL VL]
ldr z24, [x2, #24, MUL VL]
ldr z25, [x2, #25, MUL VL]
ldr z26, [x2, #26, MUL VL]
ldr z27, [x2, #27, MUL VL]
ldr z28, [x2, #28, MUL VL]
ldr z29, [x2, #29, MUL VL]
ldr z30, [x2, #30, MUL VL]
ldr z31, [x2, #31, MUL VL]
ldr x2, =ffr_in
ldr p0, [x2, #0]
wrffr p0.b
ldr x2, =p_in
ldr p0, [x2, #0, MUL VL]
ldr p1, [x2, #1, MUL VL]
ldr p2, [x2, #2, MUL VL]
ldr p3, [x2, #3, MUL VL]
ldr p4, [x2, #4, MUL VL]
ldr p5, [x2, #5, MUL VL]
ldr p6, [x2, #6, MUL VL]
ldr p7, [x2, #7, MUL VL]
ldr p8, [x2, #8, MUL VL]
ldr p9, [x2, #9, MUL VL]
ldr p10, [x2, #10, MUL VL]
ldr p11, [x2, #11, MUL VL]
ldr p12, [x2, #12, MUL VL]
ldr p13, [x2, #13, MUL VL]
ldr p14, [x2, #14, MUL VL]
ldr p15, [x2, #15, MUL VL]
1:
// Do the syscall
svc #0
// Save GPRs x8-x30
ldr x2, =gpr_out
add x2, x2, #64
stp x8, x9, [x2], #16
stp x10, x11, [x2], #16
stp x12, x13, [x2], #16
stp x14, x15, [x2], #16
stp x16, x17, [x2], #16
stp x18, x19, [x2], #16
stp x20, x21, [x2], #16
stp x22, x23, [x2], #16
stp x24, x25, [x2], #16
stp x26, x27, [x2], #16
stp x28, x29, [x2], #16
str x30, [x2]
// Restore x0 and x1 for feature checks
ldp x0, x1, [sp, #16]
// Save FPSIMD state
ldr x2, =fpr_out
stp q0, q1, [x2]
stp q2, q3, [x2, #16 * 2]
stp q4, q5, [x2, #16 * 4]
stp q6, q7, [x2, #16 * 6]
stp q8, q9, [x2, #16 * 8]
stp q10, q11, [x2, #16 * 10]
stp q12, q13, [x2, #16 * 12]
stp q14, q15, [x2, #16 * 14]
stp q16, q17, [x2, #16 * 16]
stp q18, q19, [x2, #16 * 18]
stp q20, q21, [x2, #16 * 20]
stp q22, q23, [x2, #16 * 22]
stp q24, q25, [x2, #16 * 24]
stp q26, q27, [x2, #16 * 26]
stp q28, q29, [x2, #16 * 28]
stp q30, q31, [x2, #16 * 30]
// Save the SVE state if we have some
cbz x0, 1f
ldr x2, =z_out
str z0, [x2, #0, MUL VL]
str z1, [x2, #1, MUL VL]
str z2, [x2, #2, MUL VL]
str z3, [x2, #3, MUL VL]
str z4, [x2, #4, MUL VL]
str z5, [x2, #5, MUL VL]
str z6, [x2, #6, MUL VL]
str z7, [x2, #7, MUL VL]
str z8, [x2, #8, MUL VL]
str z9, [x2, #9, MUL VL]
str z10, [x2, #10, MUL VL]
str z11, [x2, #11, MUL VL]
str z12, [x2, #12, MUL VL]
str z13, [x2, #13, MUL VL]
str z14, [x2, #14, MUL VL]
str z15, [x2, #15, MUL VL]
str z16, [x2, #16, MUL VL]
str z17, [x2, #17, MUL VL]
str z18, [x2, #18, MUL VL]
str z19, [x2, #19, MUL VL]
str z20, [x2, #20, MUL VL]
str z21, [x2, #21, MUL VL]
str z22, [x2, #22, MUL VL]
str z23, [x2, #23, MUL VL]
str z24, [x2, #24, MUL VL]
str z25, [x2, #25, MUL VL]
str z26, [x2, #26, MUL VL]
str z27, [x2, #27, MUL VL]
str z28, [x2, #28, MUL VL]
str z29, [x2, #29, MUL VL]
str z30, [x2, #30, MUL VL]
str z31, [x2, #31, MUL VL]
ldr x2, =p_out
str p0, [x2, #0, MUL VL]
str p1, [x2, #1, MUL VL]
str p2, [x2, #2, MUL VL]
str p3, [x2, #3, MUL VL]
str p4, [x2, #4, MUL VL]
str p5, [x2, #5, MUL VL]
str p6, [x2, #6, MUL VL]
str p7, [x2, #7, MUL VL]
str p8, [x2, #8, MUL VL]
str p9, [x2, #9, MUL VL]
str p10, [x2, #10, MUL VL]
str p11, [x2, #11, MUL VL]
str p12, [x2, #12, MUL VL]
str p13, [x2, #13, MUL VL]
str p14, [x2, #14, MUL VL]
str p15, [x2, #15, MUL VL]
ldr x2, =ffr_out
rdffr p0.b
str p0, [x2, #0]
1:
// Restore callee saved registers x19-x30
ldp x19, x20, [sp, #32]
ldp x21, x22, [sp, #48]
ldp x23, x24, [sp, #64]
ldp x25, x26, [sp, #80]
ldp x27, x28, [sp, #96]
ldp x29, x30, [sp], #112
ret

View File

@@ -0,0 +1,318 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2021 ARM Limited.
*/
#include <errno.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/auxv.h>
#include <sys/prctl.h>
#include <asm/hwcap.h>
#include <asm/sigcontext.h>
#include <asm/unistd.h>
#include "../../kselftest.h"
#define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
#define NUM_VL ((SVE_VQ_MAX - SVE_VQ_MIN) + 1)
extern void do_syscall(int sve_vl);
static void fill_random(void *buf, size_t size)
{
int i;
uint32_t *lbuf = buf;
/* random() returns a 32 bit number regardless of the size of long */
for (i = 0; i < size / sizeof(uint32_t); i++)
lbuf[i] = random();
}
/*
* We also repeat the test for several syscalls to try to expose different
* behaviour.
*/
static struct syscall_cfg {
int syscall_nr;
const char *name;
} syscalls[] = {
{ __NR_getpid, "getpid()" },
{ __NR_sched_yield, "sched_yield()" },
};
#define NUM_GPR 31
uint64_t gpr_in[NUM_GPR];
uint64_t gpr_out[NUM_GPR];
static void setup_gpr(struct syscall_cfg *cfg, int sve_vl)
{
fill_random(gpr_in, sizeof(gpr_in));
gpr_in[8] = cfg->syscall_nr;
memset(gpr_out, 0, sizeof(gpr_out));
}
static int check_gpr(struct syscall_cfg *cfg, int sve_vl)
{
int errors = 0;
int i;
/*
* GPR x0-x7 may be clobbered, and all others should be preserved.
*/
for (i = 9; i < ARRAY_SIZE(gpr_in); i++) {
if (gpr_in[i] != gpr_out[i]) {
ksft_print_msg("%s SVE VL %d mismatch in GPR %d: %llx != %llx\n",
cfg->name, sve_vl, i,
gpr_in[i], gpr_out[i]);
errors++;
}
}
return errors;
}
#define NUM_FPR 32
uint64_t fpr_in[NUM_FPR * 2];
uint64_t fpr_out[NUM_FPR * 2];
static void setup_fpr(struct syscall_cfg *cfg, int sve_vl)
{
fill_random(fpr_in, sizeof(fpr_in));
memset(fpr_out, 0, sizeof(fpr_out));
}
static int check_fpr(struct syscall_cfg *cfg, int sve_vl)
{
int errors = 0;
int i;
if (!sve_vl) {
for (i = 0; i < ARRAY_SIZE(fpr_in); i++) {
if (fpr_in[i] != fpr_out[i]) {
ksft_print_msg("%s Q%d/%d mismatch %llx != %llx\n",
cfg->name,
i / 2, i % 2,
fpr_in[i], fpr_out[i]);
errors++;
}
}
}
return errors;
}
static uint8_t z_zero[__SVE_ZREG_SIZE(SVE_VQ_MAX)];
uint8_t z_in[SVE_NUM_PREGS * __SVE_ZREG_SIZE(SVE_VQ_MAX)];
uint8_t z_out[SVE_NUM_PREGS * __SVE_ZREG_SIZE(SVE_VQ_MAX)];
static void setup_z(struct syscall_cfg *cfg, int sve_vl)
{
fill_random(z_in, sizeof(z_in));
fill_random(z_out, sizeof(z_out));
}
static int check_z(struct syscall_cfg *cfg, int sve_vl)
{
size_t reg_size = sve_vl;
int errors = 0;
int i;
if (!sve_vl)
return 0;
/*
* After a syscall the low 128 bits of the Z registers should
* be preserved and the rest be zeroed or preserved.
*/
for (i = 0; i < SVE_NUM_ZREGS; i++) {
void *in = &z_in[reg_size * i];
void *out = &z_out[reg_size * i];
if (memcmp(in, out, SVE_VQ_BYTES) != 0) {
ksft_print_msg("%s SVE VL %d Z%d low 128 bits changed\n",
cfg->name, sve_vl, i);
errors++;
}
}
return errors;
}
uint8_t p_in[SVE_NUM_PREGS * __SVE_PREG_SIZE(SVE_VQ_MAX)];
uint8_t p_out[SVE_NUM_PREGS * __SVE_PREG_SIZE(SVE_VQ_MAX)];
static void setup_p(struct syscall_cfg *cfg, int sve_vl)
{
fill_random(p_in, sizeof(p_in));
fill_random(p_out, sizeof(p_out));
}
static int check_p(struct syscall_cfg *cfg, int sve_vl)
{
size_t reg_size = sve_vq_from_vl(sve_vl) * 2; /* 1 bit per VL byte */
int errors = 0;
int i;
if (!sve_vl)
return 0;
/* After a syscall the P registers should be preserved or zeroed */
for (i = 0; i < SVE_NUM_PREGS * reg_size; i++)
if (p_out[i] && (p_in[i] != p_out[i]))
errors++;
if (errors)
ksft_print_msg("%s SVE VL %d predicate registers non-zero\n",
cfg->name, sve_vl);
return errors;
}
uint8_t ffr_in[__SVE_PREG_SIZE(SVE_VQ_MAX)];
uint8_t ffr_out[__SVE_PREG_SIZE(SVE_VQ_MAX)];
static void setup_ffr(struct syscall_cfg *cfg, int sve_vl)
{
/*
* It is only valid to set a contiguous set of bits starting
* at 0. For now since we're expecting this to be cleared by
* a syscall just set all bits.
*/
memset(ffr_in, 0xff, sizeof(ffr_in));
fill_random(ffr_out, sizeof(ffr_out));
}
static int check_ffr(struct syscall_cfg *cfg, int sve_vl)
{
size_t reg_size = sve_vq_from_vl(sve_vl) * 2; /* 1 bit per VL byte */
int errors = 0;
int i;
if (!sve_vl)
return 0;
/* After a syscall the P registers should be preserved or zeroed */
for (i = 0; i < reg_size; i++)
if (ffr_out[i] && (ffr_in[i] != ffr_out[i]))
errors++;
if (errors)
ksft_print_msg("%s SVE VL %d FFR non-zero\n",
cfg->name, sve_vl);
return errors;
}
typedef void (*setup_fn)(struct syscall_cfg *cfg, int sve_vl);
typedef int (*check_fn)(struct syscall_cfg *cfg, int sve_vl);
/*
* Each set of registers has a setup function which is called before
* the syscall to fill values in a global variable for loading by the
* test code and a check function which validates that the results are
* as expected. Vector lengths are passed everywhere, a vector length
* of 0 should be treated as do not test.
*/
static struct {
setup_fn setup;
check_fn check;
} regset[] = {
{ setup_gpr, check_gpr },
{ setup_fpr, check_fpr },
{ setup_z, check_z },
{ setup_p, check_p },
{ setup_ffr, check_ffr },
};
static bool do_test(struct syscall_cfg *cfg, int sve_vl)
{
int errors = 0;
int i;
for (i = 0; i < ARRAY_SIZE(regset); i++)
regset[i].setup(cfg, sve_vl);
do_syscall(sve_vl);
for (i = 0; i < ARRAY_SIZE(regset); i++)
errors += regset[i].check(cfg, sve_vl);
return errors == 0;
}
static void test_one_syscall(struct syscall_cfg *cfg)
{
int sve_vq, sve_vl;
/* FPSIMD only case */
ksft_test_result(do_test(cfg, 0),
"%s FPSIMD\n", cfg->name);
if (!(getauxval(AT_HWCAP) & HWCAP_SVE))
return;
for (sve_vq = SVE_VQ_MAX; sve_vq > 0; --sve_vq) {
sve_vl = prctl(PR_SVE_SET_VL, sve_vq * 16);
if (sve_vl == -1)
ksft_exit_fail_msg("PR_SVE_SET_VL failed: %s (%d)\n",
strerror(errno), errno);
sve_vl &= PR_SVE_VL_LEN_MASK;
if (sve_vq != sve_vq_from_vl(sve_vl))
sve_vq = sve_vq_from_vl(sve_vl);
ksft_test_result(do_test(cfg, sve_vl),
"%s SVE VL %d\n", cfg->name, sve_vl);
}
}
int sve_count_vls(void)
{
unsigned int vq;
int vl_count = 0;
int vl;
if (!(getauxval(AT_HWCAP) & HWCAP_SVE))
return 0;
/*
* Enumerate up to SVE_VQ_MAX vector lengths
*/
for (vq = SVE_VQ_MAX; vq > 0; --vq) {
vl = prctl(PR_SVE_SET_VL, vq * 16);
if (vl == -1)
ksft_exit_fail_msg("PR_SVE_SET_VL failed: %s (%d)\n",
strerror(errno), errno);
vl &= PR_SVE_VL_LEN_MASK;
if (vq != sve_vq_from_vl(vl))
vq = sve_vq_from_vl(vl);
vl_count++;
}
return vl_count;
}
int main(void)
{
int i;
srandom(getpid());
ksft_print_header();
ksft_set_plan(ARRAY_SIZE(syscalls) * (sve_count_vls() + 1));
for (i = 0; i < ARRAY_SIZE(syscalls); i++)
test_one_syscall(&syscalls[i]);
ksft_print_cnts();
return 0;
}

View File

@@ -1,3 +1,4 @@
fp-pidbench
fpsimd-test
rdvl-sve
sve-probe-vls

View File

@@ -2,13 +2,15 @@
CFLAGS += -I../../../../../usr/include/
TEST_GEN_PROGS := sve-ptrace sve-probe-vls vec-syscfg
TEST_PROGS_EXTENDED := fpsimd-test fpsimd-stress \
TEST_PROGS_EXTENDED := fp-pidbench fpsimd-test fpsimd-stress \
rdvl-sve \
sve-test sve-stress \
vlset
all: $(TEST_GEN_PROGS) $(TEST_PROGS_EXTENDED)
fp-pidbench: fp-pidbench.S asm-utils.o
$(CC) -nostdlib $^ -o $@
fpsimd-test: fpsimd-test.o asm-utils.o
$(CC) -nostdlib $^ -o $@
rdvl-sve: rdvl-sve.o rdvl.o

View File

@@ -0,0 +1,71 @@
// SPDX-License-Identifier: GPL-2.0-only
// Copyright (C) 2021 ARM Limited.
// Original author: Mark Brown <broonie@kernel.org>
//
// Trivial syscall overhead benchmark.
//
// This is implemented in asm to ensure that we don't have any issues with
// system libraries using instructions that disrupt the test.
#include <asm/unistd.h>
#include "assembler.h"
.arch_extension sve
.macro test_loop per_loop
mov x10, x20
mov x8, #__NR_getpid
mrs x11, CNTVCT_EL0
1:
\per_loop
svc #0
sub x10, x10, #1
cbnz x10, 1b
mrs x12, CNTVCT_EL0
sub x0, x12, x11
bl putdec
puts "\n"
.endm
// Main program entry point
.globl _start
function _start
_start:
puts "Iterations per test: "
mov x20, #10000
lsl x20, x20, #8
mov x0, x20
bl putdec
puts "\n"
// Test having never used SVE
puts "No SVE: "
test_loop
// Check for SVE support - should use hwcap but that's hard in asm
mrs x0, ID_AA64PFR0_EL1
ubfx x0, x0, #32, #4
cbnz x0, 1f
puts "System does not support SVE\n"
b out
1:
// Execute a SVE instruction
puts "SVE VL: "
rdvl x0, #8
bl putdec
puts "\n"
puts "SVE used once: "
test_loop
// Use SVE per syscall
puts "SVE used per syscall: "
test_loop "rdvl x0, #8"
// And we're done
out:
mov x0, #0
mov x8, #__NR_exit
svc #0

View File

@@ -21,16 +21,37 @@
#include "../../kselftest.h"
#define VL_TESTS (((SVE_VQ_MAX - SVE_VQ_MIN) + 1) * 3)
#define FPSIMD_TESTS 5
#define EXPECTED_TESTS (VL_TESTS + FPSIMD_TESTS)
#define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
/* <linux/elf.h> and <sys/auxv.h> don't like each other, so: */
#ifndef NT_ARM_SVE
#define NT_ARM_SVE 0x405
#endif
struct vec_type {
const char *name;
unsigned long hwcap_type;
unsigned long hwcap;
int regset;
int prctl_set;
};
static const struct vec_type vec_types[] = {
{
.name = "SVE",
.hwcap_type = AT_HWCAP,
.hwcap = HWCAP_SVE,
.regset = NT_ARM_SVE,
.prctl_set = PR_SVE_SET_VL,
},
};
#define VL_TESTS (((SVE_VQ_MAX - SVE_VQ_MIN) + 1) * 3)
#define FLAG_TESTS 2
#define FPSIMD_TESTS 3
#define EXPECTED_TESTS ((VL_TESTS + FLAG_TESTS + FPSIMD_TESTS) * ARRAY_SIZE(vec_types))
static void fill_buf(char *buf, size_t size)
{
int i;
@@ -59,7 +80,8 @@ static int get_fpsimd(pid_t pid, struct user_fpsimd_state *fpsimd)
return ptrace(PTRACE_GETREGSET, pid, NT_PRFPREG, &iov);
}
static struct user_sve_header *get_sve(pid_t pid, void **buf, size_t *size)
static struct user_sve_header *get_sve(pid_t pid, const struct vec_type *type,
void **buf, size_t *size)
{
struct user_sve_header *sve;
void *p;
@@ -80,7 +102,7 @@ static struct user_sve_header *get_sve(pid_t pid, void **buf, size_t *size)
iov.iov_base = *buf;
iov.iov_len = sz;
if (ptrace(PTRACE_GETREGSET, pid, NT_ARM_SVE, &iov))
if (ptrace(PTRACE_GETREGSET, pid, type->regset, &iov))
goto error;
sve = *buf;
@@ -96,17 +118,18 @@ error:
return NULL;
}
static int set_sve(pid_t pid, const struct user_sve_header *sve)
static int set_sve(pid_t pid, const struct vec_type *type,
const struct user_sve_header *sve)
{
struct iovec iov;
iov.iov_base = (void *)sve;
iov.iov_len = sve->size;
return ptrace(PTRACE_SETREGSET, pid, NT_ARM_SVE, &iov);
return ptrace(PTRACE_SETREGSET, pid, type->regset, &iov);
}
/* Validate setting and getting the inherit flag */
static void ptrace_set_get_inherit(pid_t child)
static void ptrace_set_get_inherit(pid_t child, const struct vec_type *type)
{
struct user_sve_header sve;
struct user_sve_header *new_sve = NULL;
@@ -118,9 +141,10 @@ static void ptrace_set_get_inherit(pid_t child)
sve.size = sizeof(sve);
sve.vl = sve_vl_from_vq(SVE_VQ_MIN);
sve.flags = SVE_PT_VL_INHERIT;
ret = set_sve(child, &sve);
ret = set_sve(child, type, &sve);
if (ret != 0) {
ksft_test_result_fail("Failed to set SVE_PT_VL_INHERIT\n");
ksft_test_result_fail("Failed to set %s SVE_PT_VL_INHERIT\n",
type->name);
return;
}
@@ -128,35 +152,39 @@ static void ptrace_set_get_inherit(pid_t child)
* Read back the new register state and verify that we have
* set the flags we expected.
*/
if (!get_sve(child, (void **)&new_sve, &new_sve_size)) {
ksft_test_result_fail("Failed to read SVE flags\n");
if (!get_sve(child, type, (void **)&new_sve, &new_sve_size)) {
ksft_test_result_fail("Failed to read %s SVE flags\n",
type->name);
return;
}
ksft_test_result(new_sve->flags & SVE_PT_VL_INHERIT,
"SVE_PT_VL_INHERIT set\n");
"%s SVE_PT_VL_INHERIT set\n", type->name);
/* Now clear */
sve.flags &= ~SVE_PT_VL_INHERIT;
ret = set_sve(child, &sve);
ret = set_sve(child, type, &sve);
if (ret != 0) {
ksft_test_result_fail("Failed to clear SVE_PT_VL_INHERIT\n");
ksft_test_result_fail("Failed to clear %s SVE_PT_VL_INHERIT\n",
type->name);
return;
}
if (!get_sve(child, (void **)&new_sve, &new_sve_size)) {
ksft_test_result_fail("Failed to read SVE flags\n");
if (!get_sve(child, type, (void **)&new_sve, &new_sve_size)) {
ksft_test_result_fail("Failed to read %s SVE flags\n",
type->name);
return;
}
ksft_test_result(!(new_sve->flags & SVE_PT_VL_INHERIT),
"SVE_PT_VL_INHERIT cleared\n");
"%s SVE_PT_VL_INHERIT cleared\n", type->name);
free(new_sve);
}
/* Validate attempting to set the specfied VL via ptrace */
static void ptrace_set_get_vl(pid_t child, unsigned int vl, bool *supported)
static void ptrace_set_get_vl(pid_t child, const struct vec_type *type,
unsigned int vl, bool *supported)
{
struct user_sve_header sve;
struct user_sve_header *new_sve = NULL;
@@ -166,10 +194,10 @@ static void ptrace_set_get_vl(pid_t child, unsigned int vl, bool *supported)
*supported = false;
/* Check if the VL is supported in this process */
prctl_vl = prctl(PR_SVE_SET_VL, vl);
prctl_vl = prctl(type->prctl_set, vl);
if (prctl_vl == -1)
ksft_exit_fail_msg("prctl(PR_SVE_SET_VL) failed: %s (%d)\n",
strerror(errno), errno);
ksft_exit_fail_msg("prctl(PR_%s_SET_VL) failed: %s (%d)\n",
type->name, strerror(errno), errno);
/* If the VL is not supported then a supported VL will be returned */
*supported = (prctl_vl == vl);
@@ -178,9 +206,10 @@ static void ptrace_set_get_vl(pid_t child, unsigned int vl, bool *supported)
memset(&sve, 0, sizeof(sve));
sve.size = sizeof(sve);
sve.vl = vl;
ret = set_sve(child, &sve);
ret = set_sve(child, type, &sve);
if (ret != 0) {
ksft_test_result_fail("Failed to set VL %u\n", vl);
ksft_test_result_fail("Failed to set %s VL %u\n",
type->name, vl);
return;
}
@@ -188,12 +217,14 @@ static void ptrace_set_get_vl(pid_t child, unsigned int vl, bool *supported)
* Read back the new register state and verify that we have the
* same VL that we got from prctl() on ourselves.
*/
if (!get_sve(child, (void **)&new_sve, &new_sve_size)) {
ksft_test_result_fail("Failed to read VL %u\n", vl);
if (!get_sve(child, type, (void **)&new_sve, &new_sve_size)) {
ksft_test_result_fail("Failed to read %s VL %u\n",
type->name, vl);
return;
}
ksft_test_result(new_sve->vl = prctl_vl, "Set VL %u\n", vl);
ksft_test_result(new_sve->vl = prctl_vl, "Set %s VL %u\n",
type->name, vl);
free(new_sve);
}
@@ -209,7 +240,7 @@ static void check_u32(unsigned int vl, const char *reg,
}
/* Access the FPSIMD registers via the SVE regset */
static void ptrace_sve_fpsimd(pid_t child)
static void ptrace_sve_fpsimd(pid_t child, const struct vec_type *type)
{
void *svebuf = NULL;
size_t svebufsz = 0;
@@ -219,17 +250,18 @@ static void ptrace_sve_fpsimd(pid_t child)
unsigned char *p;
/* New process should start with FPSIMD registers only */
sve = get_sve(child, &svebuf, &svebufsz);
sve = get_sve(child, type, &svebuf, &svebufsz);
if (!sve) {
ksft_test_result_fail("get_sve: %s\n", strerror(errno));
ksft_test_result_fail("get_sve(%s): %s\n",
type->name, strerror(errno));
return;
} else {
ksft_test_result_pass("get_sve(FPSIMD)\n");
ksft_test_result_pass("get_sve(%s FPSIMD)\n", type->name);
}
ksft_test_result((sve->flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD,
"Set FPSIMD registers\n");
"Set FPSIMD registers via %s\n", type->name);
if ((sve->flags & SVE_PT_REGS_MASK) != SVE_PT_REGS_FPSIMD)
goto out;
@@ -243,9 +275,9 @@ static void ptrace_sve_fpsimd(pid_t child)
p[j] = j;
}
if (set_sve(child, sve)) {
ksft_test_result_fail("set_sve(FPSIMD): %s\n",
strerror(errno));
if (set_sve(child, type, sve)) {
ksft_test_result_fail("set_sve(%s FPSIMD): %s\n",
type->name, strerror(errno));
goto out;
}
@@ -257,16 +289,20 @@ static void ptrace_sve_fpsimd(pid_t child)
goto out;
}
if (memcmp(fpsimd, &new_fpsimd, sizeof(*fpsimd)) == 0)
ksft_test_result_pass("get_fpsimd() gave same state\n");
ksft_test_result_pass("%s get_fpsimd() gave same state\n",
type->name);
else
ksft_test_result_fail("get_fpsimd() gave different state\n");
ksft_test_result_fail("%s get_fpsimd() gave different state\n",
type->name);
out:
free(svebuf);
}
/* Validate attempting to set SVE data and read SVE data */
static void ptrace_set_sve_get_sve_data(pid_t child, unsigned int vl)
static void ptrace_set_sve_get_sve_data(pid_t child,
const struct vec_type *type,
unsigned int vl)
{
void *write_buf;
void *read_buf = NULL;
@@ -281,8 +317,8 @@ static void ptrace_set_sve_get_sve_data(pid_t child, unsigned int vl)
data_size = SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, SVE_PT_REGS_SVE);
write_buf = malloc(data_size);
if (!write_buf) {
ksft_test_result_fail("Error allocating %d byte buffer for VL %u\n",
data_size, vl);
ksft_test_result_fail("Error allocating %d byte buffer for %s VL %u\n",
data_size, type->name, vl);
return;
}
write_sve = write_buf;
@@ -306,23 +342,26 @@ static void ptrace_set_sve_get_sve_data(pid_t child, unsigned int vl)
/* TODO: Generate a valid FFR pattern */
ret = set_sve(child, write_sve);
ret = set_sve(child, type, write_sve);
if (ret != 0) {
ksft_test_result_fail("Failed to set VL %u data\n", vl);
ksft_test_result_fail("Failed to set %s VL %u data\n",
type->name, vl);
goto out;
}
/* Read the data back */
if (!get_sve(child, (void **)&read_buf, &read_sve_size)) {
ksft_test_result_fail("Failed to read VL %u data\n", vl);
if (!get_sve(child, type, (void **)&read_buf, &read_sve_size)) {
ksft_test_result_fail("Failed to read %s VL %u data\n",
type->name, vl);
goto out;
}
read_sve = read_buf;
/* We might read more data if there's extensions we don't know */
if (read_sve->size < write_sve->size) {
ksft_test_result_fail("Wrote %d bytes, only read %d\n",
write_sve->size, read_sve->size);
ksft_test_result_fail("%s wrote %d bytes, only read %d\n",
type->name, write_sve->size,
read_sve->size);
goto out_read;
}
@@ -349,7 +388,8 @@ static void ptrace_set_sve_get_sve_data(pid_t child, unsigned int vl)
check_u32(vl, "FPCR", write_buf + SVE_PT_SVE_FPCR_OFFSET(vq),
read_buf + SVE_PT_SVE_FPCR_OFFSET(vq), &errors);
ksft_test_result(errors == 0, "Set and get SVE data for VL %u\n", vl);
ksft_test_result(errors == 0, "Set and get %s data for VL %u\n",
type->name, vl);
out_read:
free(read_buf);
@@ -358,7 +398,9 @@ out:
}
/* Validate attempting to set SVE data and read SVE data */
static void ptrace_set_sve_get_fpsimd_data(pid_t child, unsigned int vl)
static void ptrace_set_sve_get_fpsimd_data(pid_t child,
const struct vec_type *type,
unsigned int vl)
{
void *write_buf;
struct user_sve_header *write_sve;
@@ -376,8 +418,8 @@ static void ptrace_set_sve_get_fpsimd_data(pid_t child, unsigned int vl)
data_size = SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, SVE_PT_REGS_SVE);
write_buf = malloc(data_size);
if (!write_buf) {
ksft_test_result_fail("Error allocating %d byte buffer for VL %u\n",
data_size, vl);
ksft_test_result_fail("Error allocating %d byte buffer for %s VL %u\n",
data_size, type->name, vl);
return;
}
write_sve = write_buf;
@@ -395,16 +437,17 @@ static void ptrace_set_sve_get_fpsimd_data(pid_t child, unsigned int vl)
fill_buf(write_buf + SVE_PT_SVE_FPSR_OFFSET(vq), SVE_PT_SVE_FPSR_SIZE);
fill_buf(write_buf + SVE_PT_SVE_FPCR_OFFSET(vq), SVE_PT_SVE_FPCR_SIZE);
ret = set_sve(child, write_sve);
ret = set_sve(child, type, write_sve);
if (ret != 0) {
ksft_test_result_fail("Failed to set VL %u data\n", vl);
ksft_test_result_fail("Failed to set %s VL %u data\n",
type->name, vl);
goto out;
}
/* Read the data back */
if (get_fpsimd(child, &fpsimd_state)) {
ksft_test_result_fail("Failed to read VL %u FPSIMD data\n",
vl);
ksft_test_result_fail("Failed to read %s VL %u FPSIMD data\n",
type->name, vl);
goto out;
}
@@ -419,7 +462,8 @@ static void ptrace_set_sve_get_fpsimd_data(pid_t child, unsigned int vl)
sizeof(tmp));
if (tmp != fpsimd_state.vregs[i]) {
printf("# Mismatch in FPSIMD for VL %u Z%d\n", vl, i);
printf("# Mismatch in FPSIMD for %s VL %u Z%d\n",
type->name, vl, i);
errors++;
}
}
@@ -429,8 +473,8 @@ static void ptrace_set_sve_get_fpsimd_data(pid_t child, unsigned int vl)
check_u32(vl, "FPCR", write_buf + SVE_PT_SVE_FPCR_OFFSET(vq),
&fpsimd_state.fpcr, &errors);
ksft_test_result(errors == 0, "Set and get FPSIMD data for VL %u\n",
vl);
ksft_test_result(errors == 0, "Set and get FPSIMD data for %s VL %u\n",
type->name, vl);
out:
free(write_buf);
@@ -440,7 +484,7 @@ static int do_parent(pid_t child)
{
int ret = EXIT_FAILURE;
pid_t pid;
int status;
int status, i;
siginfo_t si;
unsigned int vq, vl;
bool vl_supported;
@@ -499,26 +543,47 @@ static int do_parent(pid_t child)
}
}
/* FPSIMD via SVE regset */
ptrace_sve_fpsimd(child);
/* prctl() flags */
ptrace_set_get_inherit(child);
/* Step through every possible VQ */
for (vq = SVE_VQ_MIN; vq <= SVE_VQ_MAX; vq++) {
vl = sve_vl_from_vq(vq);
/* First, try to set this vector length */
ptrace_set_get_vl(child, vl, &vl_supported);
/* If the VL is supported validate data set/get */
if (vl_supported) {
ptrace_set_sve_get_sve_data(child, vl);
ptrace_set_sve_get_fpsimd_data(child, vl);
for (i = 0; i < ARRAY_SIZE(vec_types); i++) {
/* FPSIMD via SVE regset */
if (getauxval(vec_types[i].hwcap_type) & vec_types[i].hwcap) {
ptrace_sve_fpsimd(child, &vec_types[i]);
} else {
ksft_test_result_skip("set SVE get SVE for VL %d\n", vl);
ksft_test_result_skip("set SVE get FPSIMD for VL %d\n", vl);
ksft_test_result_skip("%s FPSIMD get via SVE\n",
vec_types[i].name);
ksft_test_result_skip("%s FPSIMD set via SVE\n",
vec_types[i].name);
ksft_test_result_skip("%s set read via FPSIMD\n",
vec_types[i].name);
}
/* prctl() flags */
ptrace_set_get_inherit(child, &vec_types[i]);
/* Step through every possible VQ */
for (vq = SVE_VQ_MIN; vq <= SVE_VQ_MAX; vq++) {
vl = sve_vl_from_vq(vq);
/* First, try to set this vector length */
if (getauxval(vec_types[i].hwcap_type) &
vec_types[i].hwcap) {
ptrace_set_get_vl(child, &vec_types[i], vl,
&vl_supported);
} else {
ksft_test_result_skip("%s get/set VL %d\n",
vec_types[i].name, vl);
vl_supported = false;
}
/* If the VL is supported validate data set/get */
if (vl_supported) {
ptrace_set_sve_get_sve_data(child, &vec_types[i], vl);
ptrace_set_sve_get_fpsimd_data(child, &vec_types[i], vl);
} else {
ksft_test_result_skip("%s set SVE get SVE for VL %d\n",
vec_types[i].name, vl);
ksft_test_result_skip("%s set SVE get FPSIMD for VL %d\n",
vec_types[i].name, vl);
}
}
}

View File

@@ -310,14 +310,12 @@ int test_setup(struct tdescr *td)
int test_run(struct tdescr *td)
{
if (td->sig_trig) {
if (td->trigger)
return td->trigger(td);
else
return default_trigger(td);
} else {
if (td->trigger)
return td->trigger(td);
else if (td->sig_trig)
return default_trigger(td);
else
return td->run(td, NULL, NULL);
}
}
void test_result(struct tdescr *td)

View File

@@ -1078,7 +1078,7 @@
.errstr_unpriv = "R0 pointer -= pointer prohibited",
},
{
"map access: trying to leak tained dst reg",
"map access: trying to leak tainted dst reg",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),

View File

@@ -221,7 +221,7 @@ int cg_find_unified_root(char *root, size_t len)
int cg_create(const char *cgroup)
{
return mkdir(cgroup, 0644);
return mkdir(cgroup, 0755);
}
int cg_wait_for_proc_count(const char *cgroup, int count)

View File

@@ -1,11 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0 */
#define _GNU_SOURCE
#include <linux/limits.h>
#include <linux/sched.h>
#include <sys/types.h>
#include <sys/mman.h>
#include <sys/wait.h>
#include <unistd.h>
#include <fcntl.h>
#include <sched.h>
#include <stdio.h>
#include <errno.h>
#include <signal.h>
@@ -674,6 +677,166 @@ cleanup:
return ret;
}
/*
* cgroup migration permission check should be performed based on the
* credentials at the time of open instead of write.
*/
static int test_cgcore_lesser_euid_open(const char *root)
{
const uid_t test_euid = 65534; /* usually nobody, any !root is fine */
int ret = KSFT_FAIL;
char *cg_test_a = NULL, *cg_test_b = NULL;
char *cg_test_a_procs = NULL, *cg_test_b_procs = NULL;
int cg_test_b_procs_fd = -1;
uid_t saved_uid;
cg_test_a = cg_name(root, "cg_test_a");
cg_test_b = cg_name(root, "cg_test_b");
if (!cg_test_a || !cg_test_b)
goto cleanup;
cg_test_a_procs = cg_name(cg_test_a, "cgroup.procs");
cg_test_b_procs = cg_name(cg_test_b, "cgroup.procs");
if (!cg_test_a_procs || !cg_test_b_procs)
goto cleanup;
if (cg_create(cg_test_a) || cg_create(cg_test_b))
goto cleanup;
if (cg_enter_current(cg_test_a))
goto cleanup;
if (chown(cg_test_a_procs, test_euid, -1) ||
chown(cg_test_b_procs, test_euid, -1))
goto cleanup;
saved_uid = geteuid();
if (seteuid(test_euid))
goto cleanup;
cg_test_b_procs_fd = open(cg_test_b_procs, O_RDWR);
if (seteuid(saved_uid))
goto cleanup;
if (cg_test_b_procs_fd < 0)
goto cleanup;
if (write(cg_test_b_procs_fd, "0", 1) >= 0 || errno != EACCES)
goto cleanup;
ret = KSFT_PASS;
cleanup:
cg_enter_current(root);
if (cg_test_b_procs_fd >= 0)
close(cg_test_b_procs_fd);
if (cg_test_b)
cg_destroy(cg_test_b);
if (cg_test_a)
cg_destroy(cg_test_a);
free(cg_test_b_procs);
free(cg_test_a_procs);
free(cg_test_b);
free(cg_test_a);
return ret;
}
struct lesser_ns_open_thread_arg {
const char *path;
int fd;
int err;
};
static int lesser_ns_open_thread_fn(void *arg)
{
struct lesser_ns_open_thread_arg *targ = arg;
targ->fd = open(targ->path, O_RDWR);
targ->err = errno;
return 0;
}
/*
* cgroup migration permission check should be performed based on the cgroup
* namespace at the time of open instead of write.
*/
static int test_cgcore_lesser_ns_open(const char *root)
{
static char stack[65536];
const uid_t test_euid = 65534; /* usually nobody, any !root is fine */
int ret = KSFT_FAIL;
char *cg_test_a = NULL, *cg_test_b = NULL;
char *cg_test_a_procs = NULL, *cg_test_b_procs = NULL;
int cg_test_b_procs_fd = -1;
struct lesser_ns_open_thread_arg targ = { .fd = -1 };
pid_t pid;
int status;
cg_test_a = cg_name(root, "cg_test_a");
cg_test_b = cg_name(root, "cg_test_b");
if (!cg_test_a || !cg_test_b)
goto cleanup;
cg_test_a_procs = cg_name(cg_test_a, "cgroup.procs");
cg_test_b_procs = cg_name(cg_test_b, "cgroup.procs");
if (!cg_test_a_procs || !cg_test_b_procs)
goto cleanup;
if (cg_create(cg_test_a) || cg_create(cg_test_b))
goto cleanup;
if (cg_enter_current(cg_test_b))
goto cleanup;
if (chown(cg_test_a_procs, test_euid, -1) ||
chown(cg_test_b_procs, test_euid, -1))
goto cleanup;
targ.path = cg_test_b_procs;
pid = clone(lesser_ns_open_thread_fn, stack + sizeof(stack),
CLONE_NEWCGROUP | CLONE_FILES | CLONE_VM | SIGCHLD,
&targ);
if (pid < 0)
goto cleanup;
if (waitpid(pid, &status, 0) < 0)
goto cleanup;
if (!WIFEXITED(status))
goto cleanup;
cg_test_b_procs_fd = targ.fd;
if (cg_test_b_procs_fd < 0)
goto cleanup;
if (cg_enter_current(cg_test_a))
goto cleanup;
if ((status = write(cg_test_b_procs_fd, "0", 1)) >= 0 || errno != ENOENT)
goto cleanup;
ret = KSFT_PASS;
cleanup:
cg_enter_current(root);
if (cg_test_b_procs_fd >= 0)
close(cg_test_b_procs_fd);
if (cg_test_b)
cg_destroy(cg_test_b);
if (cg_test_a)
cg_destroy(cg_test_a);
free(cg_test_b_procs);
free(cg_test_a_procs);
free(cg_test_b);
free(cg_test_a);
return ret;
}
#define T(x) { x, #x }
struct corecg_test {
int (*fn)(const char *root);
@@ -689,6 +852,8 @@ struct corecg_test {
T(test_cgcore_proc_migration),
T(test_cgcore_thread_migration),
T(test_cgcore_destroy),
T(test_cgcore_lesser_euid_open),
T(test_cgcore_lesser_ns_open),
};
#undef T

0
tools/testing/selftests/net/amt.sh Normal file → Executable file
View File

View File

@@ -193,7 +193,8 @@ for family in 4 6; do
SUFFIX="64 nodad"
VXDEV=vxlan6
IPT=ip6tables
PING="ping6"
# Use ping6 on systems where ping doesn't handle IPv6
ping -w 1 -c 1 ::1 > /dev/null 2>&1 || PING="ping6"
fi
echo "IPv$family"

View File

@@ -45,7 +45,7 @@ $(OUTPUT)/sign_key.o: sign_key.S
$(CC) $(HOST_CFLAGS) -c $< -o $@
$(OUTPUT)/test_encl.elf: test_encl.lds test_encl.c test_encl_bootstrap.S
$(CC) $(ENCL_CFLAGS) -T $^ -o $@
$(CC) $(ENCL_CFLAGS) -T $^ -o $@ -Wl,--build-id=none
EXTRA_CLEAN := \
$(OUTPUT)/test_encl.elf \

View File

@@ -19,13 +19,38 @@
#include "../../../../arch/x86/include/uapi/asm/sgx.h"
enum encl_op_type {
ENCL_OP_PUT,
ENCL_OP_GET,
ENCL_OP_PUT_TO_BUFFER,
ENCL_OP_GET_FROM_BUFFER,
ENCL_OP_PUT_TO_ADDRESS,
ENCL_OP_GET_FROM_ADDRESS,
ENCL_OP_NOP,
ENCL_OP_MAX,
};
struct encl_op {
struct encl_op_header {
uint64_t type;
uint64_t buffer;
};
struct encl_op_put_to_buf {
struct encl_op_header header;
uint64_t value;
};
struct encl_op_get_from_buf {
struct encl_op_header header;
uint64_t value;
};
struct encl_op_put_to_addr {
struct encl_op_header header;
uint64_t value;
uint64_t addr;
};
struct encl_op_get_from_addr {
struct encl_op_header header;
uint64_t value;
uint64_t addr;
};
#endif /* DEFINES_H */

View File

@@ -21,6 +21,8 @@
void encl_delete(struct encl *encl)
{
struct encl_segment *heap_seg = &encl->segment_tbl[encl->nr_segments - 1];
if (encl->encl_base)
munmap((void *)encl->encl_base, encl->encl_size);
@@ -30,6 +32,8 @@ void encl_delete(struct encl *encl)
if (encl->fd)
close(encl->fd);
munmap(heap_seg->src, heap_seg->size);
if (encl->segment_tbl)
free(encl->segment_tbl);
@@ -107,11 +111,14 @@ static bool encl_ioc_add_pages(struct encl *encl, struct encl_segment *seg)
memset(&secinfo, 0, sizeof(secinfo));
secinfo.flags = seg->flags;
ioc.src = (uint64_t)encl->src + seg->offset;
ioc.src = (uint64_t)seg->src;
ioc.offset = seg->offset;
ioc.length = seg->size;
ioc.secinfo = (unsigned long)&secinfo;
ioc.flags = SGX_PAGE_MEASURE;
if (seg->measure)
ioc.flags = SGX_PAGE_MEASURE;
else
ioc.flags = 0;
rc = ioctl(encl->fd, SGX_IOC_ENCLAVE_ADD_PAGES, &ioc);
if (rc < 0) {
@@ -122,11 +129,10 @@ static bool encl_ioc_add_pages(struct encl *encl, struct encl_segment *seg)
return true;
}
bool encl_load(const char *path, struct encl *encl)
bool encl_load(const char *path, struct encl *encl, unsigned long heap_size)
{
const char device_path[] = "/dev/sgx_enclave";
struct encl_segment *seg;
Elf64_Phdr *phdr_tbl;
off_t src_offset;
Elf64_Ehdr *ehdr;
@@ -178,6 +184,8 @@ bool encl_load(const char *path, struct encl *encl)
ehdr = encl->bin;
phdr_tbl = encl->bin + ehdr->e_phoff;
encl->nr_segments = 1; /* one for the heap */
for (i = 0; i < ehdr->e_phnum; i++) {
Elf64_Phdr *phdr = &phdr_tbl[i];
@@ -193,7 +201,6 @@ bool encl_load(const char *path, struct encl *encl)
for (i = 0, j = 0; i < ehdr->e_phnum; i++) {
Elf64_Phdr *phdr = &phdr_tbl[i];
unsigned int flags = phdr->p_flags;
struct encl_segment *seg;
if (phdr->p_type != PT_LOAD)
continue;
@@ -216,6 +223,7 @@ bool encl_load(const char *path, struct encl *encl)
if (j == 0) {
src_offset = phdr->p_offset & PAGE_MASK;
encl->src = encl->bin + src_offset;
seg->prot = PROT_READ | PROT_WRITE;
seg->flags = SGX_PAGE_TYPE_TCS << 8;
@@ -228,15 +236,27 @@ bool encl_load(const char *path, struct encl *encl)
seg->offset = (phdr->p_offset & PAGE_MASK) - src_offset;
seg->size = (phdr->p_filesz + PAGE_SIZE - 1) & PAGE_MASK;
seg->src = encl->src + seg->offset;
seg->measure = true;
j++;
}
assert(j == encl->nr_segments);
assert(j == encl->nr_segments - 1);
encl->src = encl->bin + src_offset;
encl->src_size = encl->segment_tbl[j - 1].offset +
encl->segment_tbl[j - 1].size;
seg = &encl->segment_tbl[j];
seg->offset = encl->segment_tbl[j - 1].offset + encl->segment_tbl[j - 1].size;
seg->size = heap_size;
seg->src = mmap(NULL, heap_size, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
seg->prot = PROT_READ | PROT_WRITE;
seg->flags = (SGX_PAGE_TYPE_REG << 8) | seg->prot;
seg->measure = false;
if (seg->src == MAP_FAILED)
goto err;
encl->src_size = encl->segment_tbl[j].offset + encl->segment_tbl[j].size;
for (encl->encl_size = 4096; encl->encl_size < encl->src_size; )
encl->encl_size <<= 1;

View File

@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2016-20 Intel Corporation. */
#include <cpuid.h>
#include <elf.h>
#include <errno.h>
#include <fcntl.h>
@@ -21,6 +22,7 @@
#include "main.h"
static const uint64_t MAGIC = 0x1122334455667788ULL;
static const uint64_t MAGIC2 = 0x8877665544332211ULL;
vdso_sgx_enter_enclave_t vdso_sgx_enter_enclave;
struct vdso_symtab {
@@ -107,12 +109,32 @@ static Elf64_Sym *vdso_symtab_get(struct vdso_symtab *symtab, const char *name)
return NULL;
}
/*
* Return the offset in the enclave where the data segment can be found.
* The first RW segment loaded is the TCS, skip that to get info on the
* data segment.
*/
static off_t encl_get_data_offset(struct encl *encl)
{
int i;
for (i = 1; i < encl->nr_segments; i++) {
struct encl_segment *seg = &encl->segment_tbl[i];
if (seg->prot == (PROT_READ | PROT_WRITE))
return seg->offset;
}
return -1;
}
FIXTURE(enclave) {
struct encl encl;
struct sgx_enclave_run run;
};
FIXTURE_SETUP(enclave)
static bool setup_test_encl(unsigned long heap_size, struct encl *encl,
struct __test_metadata *_metadata)
{
Elf64_Sym *sgx_enter_enclave_sym = NULL;
struct vdso_symtab symtab;
@@ -122,31 +144,25 @@ FIXTURE_SETUP(enclave)
unsigned int i;
void *addr;
if (!encl_load("test_encl.elf", &self->encl)) {
encl_delete(&self->encl);
ksft_exit_skip("cannot load enclaves\n");
if (!encl_load("test_encl.elf", encl, heap_size)) {
encl_delete(encl);
TH_LOG("Failed to load the test enclave.\n");
}
for (i = 0; i < self->encl.nr_segments; i++) {
seg = &self->encl.segment_tbl[i];
TH_LOG("0x%016lx 0x%016lx 0x%02x", seg->offset, seg->size, seg->prot);
}
if (!encl_measure(&self->encl))
if (!encl_measure(encl))
goto err;
if (!encl_build(&self->encl))
if (!encl_build(encl))
goto err;
/*
* An enclave consumer only must do this.
*/
for (i = 0; i < self->encl.nr_segments; i++) {
struct encl_segment *seg = &self->encl.segment_tbl[i];
for (i = 0; i < encl->nr_segments; i++) {
struct encl_segment *seg = &encl->segment_tbl[i];
addr = mmap((void *)self->encl.encl_base + seg->offset, seg->size,
seg->prot, MAP_SHARED | MAP_FIXED, self->encl.fd, 0);
addr = mmap((void *)encl->encl_base + seg->offset, seg->size,
seg->prot, MAP_SHARED | MAP_FIXED, encl->fd, 0);
EXPECT_NE(addr, MAP_FAILED);
if (addr == MAP_FAILED)
goto err;
@@ -166,8 +182,16 @@ FIXTURE_SETUP(enclave)
vdso_sgx_enter_enclave = addr + sgx_enter_enclave_sym->st_value;
memset(&self->run, 0, sizeof(self->run));
self->run.tcs = self->encl.encl_base;
return true;
err:
encl_delete(encl);
for (i = 0; i < encl->nr_segments; i++) {
seg = &encl->segment_tbl[i];
TH_LOG("0x%016lx 0x%016lx 0x%02x", seg->offset, seg->size, seg->prot);
}
maps_file = fopen("/proc/self/maps", "r");
if (maps_file != NULL) {
@@ -181,11 +205,13 @@ FIXTURE_SETUP(enclave)
fclose(maps_file);
}
err:
if (!sgx_enter_enclave_sym)
encl_delete(&self->encl);
TH_LOG("Failed to initialize the test enclave.\n");
ASSERT_NE(sgx_enter_enclave_sym, NULL);
return false;
}
FIXTURE_SETUP(enclave)
{
}
FIXTURE_TEARDOWN(enclave)
@@ -215,44 +241,130 @@ FIXTURE_TEARDOWN(enclave)
TEST_F(enclave, unclobbered_vdso)
{
struct encl_op op;
struct encl_op_get_from_buf get_op;
struct encl_op_put_to_buf put_op;
op.type = ENCL_OP_PUT;
op.buffer = MAGIC;
ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
EXPECT_EQ(ENCL_CALL(&op, &self->run, false), 0);
memset(&self->run, 0, sizeof(self->run));
self->run.tcs = self->encl.encl_base;
put_op.header.type = ENCL_OP_PUT_TO_BUFFER;
put_op.value = MAGIC;
EXPECT_EQ(ENCL_CALL(&put_op, &self->run, false), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.user_data, 0);
op.type = ENCL_OP_GET;
op.buffer = 0;
get_op.header.type = ENCL_OP_GET_FROM_BUFFER;
get_op.value = 0;
EXPECT_EQ(ENCL_CALL(&op, &self->run, false), 0);
EXPECT_EQ(ENCL_CALL(&get_op, &self->run, false), 0);
EXPECT_EQ(op.buffer, MAGIC);
EXPECT_EQ(get_op.value, MAGIC);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.user_data, 0);
}
TEST_F(enclave, clobbered_vdso)
/*
* A section metric is concatenated in a way that @low bits 12-31 define the
* bits 12-31 of the metric and @high bits 0-19 define the bits 32-51 of the
* metric.
*/
static unsigned long sgx_calc_section_metric(unsigned int low,
unsigned int high)
{
struct encl_op op;
return (low & GENMASK_ULL(31, 12)) +
((high & GENMASK_ULL(19, 0)) << 32);
}
op.type = ENCL_OP_PUT;
op.buffer = MAGIC;
/*
* Sum total available physical SGX memory across all EPC sections
*
* Return: total available physical SGX memory available on system
*/
static unsigned long get_total_epc_mem(void)
{
unsigned int eax, ebx, ecx, edx;
unsigned long total_size = 0;
unsigned int type;
int section = 0;
EXPECT_EQ(ENCL_CALL(&op, &self->run, true), 0);
while (true) {
__cpuid_count(SGX_CPUID, section + SGX_CPUID_EPC, eax, ebx, ecx, edx);
type = eax & SGX_CPUID_EPC_MASK;
if (type == SGX_CPUID_EPC_INVALID)
break;
if (type != SGX_CPUID_EPC_SECTION)
break;
total_size += sgx_calc_section_metric(ecx, edx);
section++;
}
return total_size;
}
TEST_F(enclave, unclobbered_vdso_oversubscribed)
{
struct encl_op_get_from_buf get_op;
struct encl_op_put_to_buf put_op;
unsigned long total_mem;
total_mem = get_total_epc_mem();
ASSERT_NE(total_mem, 0);
ASSERT_TRUE(setup_test_encl(total_mem, &self->encl, _metadata));
memset(&self->run, 0, sizeof(self->run));
self->run.tcs = self->encl.encl_base;
put_op.header.type = ENCL_OP_PUT_TO_BUFFER;
put_op.value = MAGIC;
EXPECT_EQ(ENCL_CALL(&put_op, &self->run, false), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.user_data, 0);
op.type = ENCL_OP_GET;
op.buffer = 0;
get_op.header.type = ENCL_OP_GET_FROM_BUFFER;
get_op.value = 0;
EXPECT_EQ(ENCL_CALL(&op, &self->run, true), 0);
EXPECT_EQ(ENCL_CALL(&get_op, &self->run, false), 0);
EXPECT_EQ(op.buffer, MAGIC);
EXPECT_EQ(get_op.value, MAGIC);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.user_data, 0);
}
TEST_F(enclave, clobbered_vdso)
{
struct encl_op_get_from_buf get_op;
struct encl_op_put_to_buf put_op;
ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
memset(&self->run, 0, sizeof(self->run));
self->run.tcs = self->encl.encl_base;
put_op.header.type = ENCL_OP_PUT_TO_BUFFER;
put_op.value = MAGIC;
EXPECT_EQ(ENCL_CALL(&put_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.user_data, 0);
get_op.header.type = ENCL_OP_GET_FROM_BUFFER;
get_op.value = 0;
EXPECT_EQ(ENCL_CALL(&get_op, &self->run, true), 0);
EXPECT_EQ(get_op.value, MAGIC);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.user_data, 0);
}
@@ -267,27 +379,179 @@ static int test_handler(long rdi, long rsi, long rdx, long ursp, long r8, long r
TEST_F(enclave, clobbered_vdso_and_user_function)
{
struct encl_op op;
struct encl_op_get_from_buf get_op;
struct encl_op_put_to_buf put_op;
ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
memset(&self->run, 0, sizeof(self->run));
self->run.tcs = self->encl.encl_base;
self->run.user_handler = (__u64)test_handler;
self->run.user_data = 0xdeadbeef;
op.type = ENCL_OP_PUT;
op.buffer = MAGIC;
put_op.header.type = ENCL_OP_PUT_TO_BUFFER;
put_op.value = MAGIC;
EXPECT_EQ(ENCL_CALL(&op, &self->run, true), 0);
EXPECT_EQ(ENCL_CALL(&put_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.user_data, 0);
op.type = ENCL_OP_GET;
op.buffer = 0;
get_op.header.type = ENCL_OP_GET_FROM_BUFFER;
get_op.value = 0;
EXPECT_EQ(ENCL_CALL(&op, &self->run, true), 0);
EXPECT_EQ(ENCL_CALL(&get_op, &self->run, true), 0);
EXPECT_EQ(op.buffer, MAGIC);
EXPECT_EQ(get_op.value, MAGIC);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.user_data, 0);
}
/*
* Sanity check that it is possible to enter either of the two hardcoded TCS
*/
TEST_F(enclave, tcs_entry)
{
struct encl_op_header op;
ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
memset(&self->run, 0, sizeof(self->run));
self->run.tcs = self->encl.encl_base;
op.type = ENCL_OP_NOP;
EXPECT_EQ(ENCL_CALL(&op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
/* Move to the next TCS. */
self->run.tcs = self->encl.encl_base + PAGE_SIZE;
EXPECT_EQ(ENCL_CALL(&op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
}
/*
* Second page of .data segment is used to test changing PTE permissions.
* This spans the local encl_buffer within the test enclave.
*
* 1) Start with a sanity check: a value is written to the target page within
* the enclave and read back to ensure target page can be written to.
* 2) Change PTE permissions (RW -> RO) of target page within enclave.
* 3) Repeat (1) - this time expecting a regular #PF communicated via the
* vDSO.
* 4) Change PTE permissions of target page within enclave back to be RW.
* 5) Repeat (1) by resuming enclave, now expected to be possible to write to
* and read from target page within enclave.
*/
TEST_F(enclave, pte_permissions)
{
struct encl_op_get_from_addr get_addr_op;
struct encl_op_put_to_addr put_addr_op;
unsigned long data_start;
int ret;
ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
memset(&self->run, 0, sizeof(self->run));
self->run.tcs = self->encl.encl_base;
data_start = self->encl.encl_base +
encl_get_data_offset(&self->encl) +
PAGE_SIZE;
/*
* Sanity check to ensure it is possible to write to page that will
* have its permissions manipulated.
*/
/* Write MAGIC to page */
put_addr_op.value = MAGIC;
put_addr_op.addr = data_start;
put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
/*
* Read memory that was just written to, confirming that it is the
* value previously written (MAGIC).
*/
get_addr_op.value = 0;
get_addr_op.addr = data_start;
get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
EXPECT_EQ(get_addr_op.value, MAGIC);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
/* Change PTE permissions of target page within the enclave */
ret = mprotect((void *)data_start, PAGE_SIZE, PROT_READ);
if (ret)
perror("mprotect");
/*
* PTE permissions of target page changed to read-only, EPCM
* permissions unchanged (EPCM permissions are RW), attempt to
* write to the page, expecting a regular #PF.
*/
put_addr_op.value = MAGIC2;
EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
EXPECT_EQ(self->run.exception_vector, 14);
EXPECT_EQ(self->run.exception_error_code, 0x7);
EXPECT_EQ(self->run.exception_addr, data_start);
self->run.exception_vector = 0;
self->run.exception_error_code = 0;
self->run.exception_addr = 0;
/*
* Change PTE permissions back to enable enclave to write to the
* target page and resume enclave - do not expect any exceptions this
* time.
*/
ret = mprotect((void *)data_start, PAGE_SIZE, PROT_READ | PROT_WRITE);
if (ret)
perror("mprotect");
EXPECT_EQ(vdso_sgx_enter_enclave((unsigned long)&put_addr_op, 0,
0, ERESUME, 0, 0, &self->run),
0);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
get_addr_op.value = 0;
EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
EXPECT_EQ(get_addr_op.value, MAGIC2);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.exception_vector, 0);
EXPECT_EQ(self->run.exception_error_code, 0);
EXPECT_EQ(self->run.exception_addr, 0);
}
TEST_HARNESS_MAIN

View File

@@ -6,11 +6,15 @@
#ifndef MAIN_H
#define MAIN_H
#define ENCL_HEAP_SIZE_DEFAULT 4096
struct encl_segment {
void *src;
off_t offset;
size_t size;
unsigned int prot;
unsigned int flags;
bool measure;
};
struct encl {
@@ -31,7 +35,7 @@ extern unsigned char sign_key[];
extern unsigned char sign_key_end[];
void encl_delete(struct encl *ctx);
bool encl_load(const char *path, struct encl *encl);
bool encl_load(const char *path, struct encl *encl, unsigned long heap_size);
bool encl_measure(struct encl *encl);
bool encl_build(struct encl *encl);

View File

@@ -289,15 +289,17 @@ static bool mrenclave_eextend(EVP_MD_CTX *ctx, uint64_t offset,
static bool mrenclave_segment(EVP_MD_CTX *ctx, struct encl *encl,
struct encl_segment *seg)
{
uint64_t end = seg->offset + seg->size;
uint64_t end = seg->size;
uint64_t offset;
for (offset = seg->offset; offset < end; offset += PAGE_SIZE) {
if (!mrenclave_eadd(ctx, offset, seg->flags))
for (offset = 0; offset < end; offset += PAGE_SIZE) {
if (!mrenclave_eadd(ctx, seg->offset + offset, seg->flags))
return false;
if (!mrenclave_eextend(ctx, offset, encl->src + offset))
return false;
if (seg->measure) {
if (!mrenclave_eextend(ctx, seg->offset + offset, seg->src + offset))
return false;
}
}
return true;

View File

@@ -4,6 +4,11 @@
#include <stddef.h>
#include "defines.h"
/*
* Data buffer spanning two pages that will be placed first in .data
* segment. Even if not used internally the second page is needed by
* external test manipulating page permissions.
*/
static uint8_t encl_buffer[8192] = { 1 };
static void *memcpy(void *dest, const void *src, size_t n)
@@ -16,20 +21,51 @@ static void *memcpy(void *dest, const void *src, size_t n)
return dest;
}
static void do_encl_op_put_to_buf(void *op)
{
struct encl_op_put_to_buf *op2 = op;
memcpy(&encl_buffer[0], &op2->value, 8);
}
static void do_encl_op_get_from_buf(void *op)
{
struct encl_op_get_from_buf *op2 = op;
memcpy(&op2->value, &encl_buffer[0], 8);
}
static void do_encl_op_put_to_addr(void *_op)
{
struct encl_op_put_to_addr *op = _op;
memcpy((void *)op->addr, &op->value, 8);
}
static void do_encl_op_get_from_addr(void *_op)
{
struct encl_op_get_from_addr *op = _op;
memcpy(&op->value, (void *)op->addr, 8);
}
static void do_encl_op_nop(void *_op)
{
}
void encl_body(void *rdi, void *rsi)
{
struct encl_op *op = (struct encl_op *)rdi;
const void (*encl_op_array[ENCL_OP_MAX])(void *) = {
do_encl_op_put_to_buf,
do_encl_op_get_from_buf,
do_encl_op_put_to_addr,
do_encl_op_get_from_addr,
do_encl_op_nop,
};
switch (op->type) {
case ENCL_OP_PUT:
memcpy(&encl_buffer[0], &op->buffer, 8);
break;
struct encl_op_header *op = (struct encl_op_header *)rdi;
case ENCL_OP_GET:
memcpy(&op->buffer, &encl_buffer[0], 8);
break;
default:
break;
}
if (op->type < ENCL_OP_MAX)
(*encl_op_array[op->type])(op);
}

View File

@@ -12,7 +12,7 @@
.fill 1, 8, 0 # STATE (set by CPU)
.fill 1, 8, 0 # FLAGS
.quad encl_ssa # OSSA
.quad encl_ssa_tcs1 # OSSA
.fill 1, 4, 0 # CSSA (set by CPU)
.fill 1, 4, 1 # NSSA
.quad encl_entry # OENTRY
@@ -23,10 +23,10 @@
.fill 1, 4, 0xFFFFFFFF # GSLIMIT
.fill 4024, 1, 0 # Reserved
# Identical to the previous TCS.
# TCS2
.fill 1, 8, 0 # STATE (set by CPU)
.fill 1, 8, 0 # FLAGS
.quad encl_ssa # OSSA
.quad encl_ssa_tcs2 # OSSA
.fill 1, 4, 0 # CSSA (set by CPU)
.fill 1, 4, 1 # NSSA
.quad encl_entry # OENTRY
@@ -40,8 +40,9 @@
.text
encl_entry:
# RBX contains the base address for TCS, which is also the first address
# inside the enclave. By adding the value of le_stack_end to it, we get
# RBX contains the base address for TCS, which is the first address
# inside the enclave for TCS #1 and one page into the enclave for
# TCS #2. By adding the value of encl_stack to it, we get
# the absolute address for the stack.
lea (encl_stack)(%rbx), %rax
xchg %rsp, %rax
@@ -81,9 +82,15 @@ encl_entry:
.section ".data", "aw"
encl_ssa:
encl_ssa_tcs1:
.space 4096
encl_ssa_tcs2:
.space 4096
.balign 4096
.space 8192
# Stack of TCS #1
.space 4096
encl_stack:
.balign 4096
# Stack of TCS #2
.space 4096