diff --git a/CMakeLists.txt b/CMakeLists.txt
index af34214bae..5055ea4d36 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -12,7 +12,7 @@ if(NOT CMAKE_BUILD_TYPE)
endif()
if(NOT CMAKE_INSTALL_PREFIX)
- set(CMAKE_INSTALL_PREFIX "${CMAKE_BINARY_DIR}/stage1" CACHE STRING
+ set(CMAKE_INSTALL_PREFIX "${CMAKE_BINARY_DIR}/stage3" CACHE STRING
"Directory to install zig to" FORCE)
endif()
@@ -65,6 +65,9 @@ if("${ZIG_VERSION}" STREQUAL "")
endif()
message(STATUS "Configuring zig version ${ZIG_VERSION}")
+set(ZIG_SKIP_INSTALL_LIB_FILES off CACHE BOOL
+ "Disable copying lib/ files to install prefix during the build phase")
+
set(ZIG_STATIC off CACHE BOOL "Attempt to build a static zig executable (not compatible with glibc)")
set(ZIG_SHARED_LLVM off CACHE BOOL "Prefer linking against shared LLVM libraries")
set(ZIG_STATIC_LLVM off CACHE BOOL "Prefer linking against static LLVM libraries")
@@ -333,7 +336,7 @@ set(ZIG_CONFIG_H_OUT "${CMAKE_BINARY_DIR}/config.h")
set(ZIG_CONFIG_ZIG_OUT "${CMAKE_BINARY_DIR}/config.zig")
# This is our shim which will be replaced by stage1.zig.
-set(ZIG0_SOURCES
+set(ZIG1_SOURCES
"${CMAKE_SOURCE_DIR}/src/stage1/zig0.cpp"
)
@@ -373,9 +376,9 @@ set(ZIG_CPP_SOURCES
# https://github.com/ziglang/zig/issues/6363
"${CMAKE_SOURCE_DIR}/src/windows_sdk.cpp"
)
-# Needed because we use cmake, not the zig build system, to build zig1.o.
+# Needed because we use cmake, not the zig build system, to build zig2.o.
# This list is generated by building zig and then clearing the zig-cache directory,
-# then manually running the build-obj command (see BUILD_ZIG1_ARGS), and then looking
+# then manually running the build-obj command (see BUILD_ZIG2_ARGS), and then looking
# in the zig-cache directory for the compiler-generated list of zig file dependencies.
set(ZIG_STAGE2_SOURCES
"${ZIG_CONFIG_ZIG_OUT}"
@@ -942,40 +945,51 @@ if(MSVC OR MINGW)
endif()
if("${ZIG_EXECUTABLE}" STREQUAL "")
- add_executable(zig0 ${ZIG0_SOURCES})
- set_target_properties(zig0 PROPERTIES
+ add_executable(zig1 ${ZIG1_SOURCES})
+ set_target_properties(zig1 PROPERTIES
COMPILE_FLAGS ${EXE_CFLAGS}
LINK_FLAGS ${EXE_LDFLAGS}
)
- target_link_libraries(zig0 zigstage1)
+ target_link_libraries(zig1 zigstage1)
endif()
if(MSVC)
- set(ZIG1_OBJECT "${CMAKE_BINARY_DIR}/zig1.obj")
+ set(ZIG2_OBJECT "${CMAKE_BINARY_DIR}/zig2.obj")
else()
- set(ZIG1_OBJECT "${CMAKE_BINARY_DIR}/zig1.o")
+ set(ZIG2_OBJECT "${CMAKE_BINARY_DIR}/zig2.o")
endif()
if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug")
- set(ZIG1_RELEASE_ARG "")
+ set(ZIG_RELEASE_ARG "")
+elseif("${CMAKE_BUILD_TYPE}" STREQUAL "RelWithDebInfo")
+ set(ZIG_RELEASE_ARG -Drelease)
else()
- set(ZIG1_RELEASE_ARG -OReleaseFast --strip)
+ set(ZIG_RELEASE_ARG -Drelease -Dstrip)
+endif()
+if(ZIG_SKIP_INSTALL_LIB_FILES)
+ set(ZIG_SKIP_INSTALL_LIB_FILES_ARG "-Dskip-install-lib-files")
+else()
+ set(ZIG_SKIP_INSTALL_LIB_FILES_ARG "-Dskip-install-lib-files=false")
endif()
if(ZIG_SINGLE_THREADED)
- set(ZIG1_SINGLE_THREADED_ARG "-fsingle-threaded")
+ set(ZIG_SINGLE_THREADED_ARG "-fsingle-threaded")
else()
- set(ZIG1_SINGLE_THREADED_ARG "")
+ set(ZIG_SINGLE_THREADED_ARG "")
+endif()
+if(ZIG_STATIC)
+ set(ZIG_STATIC_ARG "-Duse-zig-libcxx")
+else()
+ set(ZIG_STATIC_ARG "")
endif()
-set(BUILD_ZIG1_ARGS
+set(BUILD_ZIG2_ARGS
"src/stage1.zig"
- -target "${ZIG_TARGET_TRIPLE}"
- "-mcpu=${ZIG_TARGET_MCPU}"
- --name zig1
+ --name zig2
--zig-lib-dir "${CMAKE_SOURCE_DIR}/lib"
- "-femit-bin=${ZIG1_OBJECT}"
+ "-femit-bin=${ZIG2_OBJECT}"
-fcompiler-rt
- "${ZIG1_RELEASE_ARG}"
- "${ZIG1_SINGLE_THREADED_ARG}"
+ ${ZIG_SINGLE_THREADED_ARG}
+ -target "${ZIG_TARGET_TRIPLE}"
+ -mcpu "${ZIG_TARGET_MCPU}"
-lc
--pkg-begin build_options "${ZIG_CONFIG_ZIG_OUT}"
--pkg-end
@@ -985,68 +999,64 @@ set(BUILD_ZIG1_ARGS
if("${ZIG_EXECUTABLE}" STREQUAL "")
add_custom_command(
- OUTPUT "${ZIG1_OBJECT}"
- COMMAND zig0 ${BUILD_ZIG1_ARGS}
- DEPENDS zig0 "${ZIG_STAGE2_SOURCES}"
- COMMENT STATUS "Building self-hosted component ${ZIG1_OBJECT}"
- WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}"
+ OUTPUT "${ZIG2_OBJECT}"
+ COMMAND zig1 ${BUILD_ZIG2_ARGS}
+ DEPENDS zig1 "${ZIG_STAGE2_SOURCES}"
+ COMMENT STATUS "Building stage2 object ${ZIG2_OBJECT}"
+ WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}"
)
- set(ZIG_EXECUTABLE "${zig_BINARY_DIR}/zig")
if (WIN32)
- set(ZIG_EXECUTABLE "${ZIG_EXECUTABLE}.exe")
+ set(ZIG_EXECUTABLE "${zig2_BINARY_DIR}/zig2.exe")
+ else()
+ set(ZIG_EXECUTABLE "${zig2_BINARY_DIR}/zig2")
endif()
else()
add_custom_command(
- OUTPUT "${ZIG1_OBJECT}"
- COMMAND "${ZIG_EXECUTABLE}" "build-obj" ${BUILD_ZIG1_ARGS}
- DEPENDS ${ZIG_STAGE2_SOURCES}
- COMMENT STATUS "Building self-hosted component ${ZIG1_OBJECT}"
- WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}"
+ OUTPUT "${ZIG2_OBJECT}"
+ COMMAND "${ZIG_EXECUTABLE}" "build-obj" ${BUILD_ZIG2_ARGS}
+ DEPENDS ${ZIG_STAGE2_SOURCES}
+ COMMENT STATUS "Building stage2 component ${ZIG2_OBJECT}"
+ WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}"
)
endif()
# cmake won't let us configure an executable without C sources.
-add_executable(zig "${CMAKE_SOURCE_DIR}/src/stage1/empty.cpp" "${ZIG1_OBJECT}")
+add_executable(zig2 "${CMAKE_SOURCE_DIR}/src/stage1/empty.cpp" "${ZIG2_OBJECT}")
-set_target_properties(zig PROPERTIES
+set_target_properties(zig2 PROPERTIES
COMPILE_FLAGS ${EXE_CFLAGS}
LINK_FLAGS ${EXE_LDFLAGS}
)
-target_link_libraries(zig zigstage1)
+target_link_libraries(zig2 zigstage1)
if(MSVC)
- target_link_libraries(zig ntdll.lib)
+ target_link_libraries(zig2 ntdll.lib)
elseif(MINGW)
- target_link_libraries(zig ntdll)
+ target_link_libraries(zig2 ntdll)
endif()
-install(TARGETS zig DESTINATION bin)
-
-set(ZIG_SKIP_INSTALL_LIB_FILES off CACHE BOOL
- "Disable copying lib/ files to install prefix during the build phase")
-
+# Dummy install command so that the "install" target is not missing.
+# This is redundant from the "stage3" custom target below.
if(NOT ZIG_SKIP_INSTALL_LIB_FILES)
- set(ZIG_INSTALL_ARGS "build"
- --zig-lib-dir "${CMAKE_SOURCE_DIR}/lib"
- "-Dlib-files-only"
- --prefix "${CMAKE_INSTALL_PREFIX}"
- "-Dconfig_h=${ZIG_CONFIG_H_OUT}"
- install
- )
-
- # CODE has no effect with Visual Studio build system generator, therefore
- # when using Visual Studio build system generator we resort to running
- # `zig build install` during the build phase.
- if(MSVC)
- add_custom_target(zig_install_lib_files ALL
- COMMAND zig ${ZIG_INSTALL_ARGS}
- DEPENDS zig
- WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}"
- )
- else()
- get_target_property(zig_BINARY_DIR zig BINARY_DIR)
- install(CODE "set(zig_EXE \"${ZIG_EXECUTABLE}\")")
- install(CODE "set(ZIG_INSTALL_ARGS \"${ZIG_INSTALL_ARGS}\")")
- install(CODE "set(CMAKE_SOURCE_DIR \"${CMAKE_SOURCE_DIR}\")")
- install(SCRIPT ${CMAKE_CURRENT_SOURCE_DIR}/cmake/install.cmake)
- endif()
+ install(FILES "lib/compiler_rt.zig" DESTINATION "lib/zig")
endif()
+
+set(ZIG_INSTALL_ARGS "build"
+ --zig-lib-dir "${CMAKE_SOURCE_DIR}/lib"
+ --prefix "${CMAKE_INSTALL_PREFIX}"
+ "-Dconfig_h=${ZIG_CONFIG_H_OUT}"
+ "-Denable-llvm"
+ "-Denable-stage1"
+ ${ZIG_RELEASE_ARG}
+ ${ZIG_STATIC_ARG}
+ ${ZIG_SKIP_INSTALL_LIB_FILES_ARG}
+ ${ZIG_SINGLE_THREADED_ARG}
+ "-Dtarget=${ZIG_TARGET_TRIPLE}"
+ "-Dcpu=${ZIG_TARGET_MCPU}"
+)
+
+add_custom_target(stage3 ALL
+ COMMAND zig2 ${ZIG_INSTALL_ARGS}
+ DEPENDS zig2
+ COMMENT STATUS "Building stage3"
+ WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}"
+)
diff --git a/build.zig b/build.zig
index 4ecb7f9a4f..fc89b54023 100644
--- a/build.zig
+++ b/build.zig
@@ -15,6 +15,7 @@ const stack_size = 32 * 1024 * 1024;
pub fn build(b: *Builder) !void {
b.setPreferredReleaseMode(.ReleaseFast);
+ const test_step = b.step("test", "Run all the tests");
const mode = b.standardReleaseOptions();
const target = b.standardTargetOptions(.{});
const single_threaded = b.option(bool, "single-threaded", "Build artifacts that run in single threaded mode");
@@ -39,8 +40,6 @@ pub fn build(b: *Builder) !void {
const docs_step = b.step("docs", "Build documentation");
docs_step.dependOn(&docgen_cmd.step);
- const toolchain_step = b.step("test-toolchain", "Run the tests for the toolchain");
-
var test_cases = b.addTest("src/test.zig");
test_cases.stack_size = stack_size;
test_cases.setBuildMode(mode);
@@ -64,10 +63,9 @@ pub fn build(b: *Builder) !void {
const only_install_lib_files = b.option(bool, "lib-files-only", "Only install library files") orelse false;
- const is_stage1 = b.option(bool, "stage1", "Build the stage1 compiler, put stage2 behind a feature flag") orelse false;
- const omit_stage2 = b.option(bool, "omit-stage2", "Do not include stage2 behind a feature flag inside stage1") orelse false;
+ const have_stage1 = b.option(bool, "enable-stage1", "Include the stage1 compiler behind a feature flag") orelse false;
const static_llvm = b.option(bool, "static-llvm", "Disable integration with system-installed LLVM, Clang, LLD, and libc++") orelse false;
- const enable_llvm = b.option(bool, "enable-llvm", "Build self-hosted compiler with LLVM backend enabled") orelse (is_stage1 or static_llvm);
+ const enable_llvm = b.option(bool, "enable-llvm", "Build self-hosted compiler with LLVM backend enabled") orelse (have_stage1 or static_llvm);
const llvm_has_m68k = b.option(
bool,
"llvm-has-m68k",
@@ -137,7 +135,7 @@ pub fn build(b: *Builder) !void {
};
const main_file: ?[]const u8 = mf: {
- if (!is_stage1) break :mf "src/main.zig";
+ if (!have_stage1) break :mf "src/main.zig";
if (use_zig0) break :mf null;
break :mf "src/stage1.zig";
};
@@ -150,7 +148,7 @@ pub fn build(b: *Builder) !void {
exe.setBuildMode(mode);
exe.setTarget(target);
if (!skip_stage2_tests) {
- toolchain_step.dependOn(&exe.step);
+ test_step.dependOn(&exe.step);
}
b.default_step.dependOn(&exe.step);
@@ -248,7 +246,7 @@ pub fn build(b: *Builder) !void {
}
};
- if (is_stage1) {
+ if (have_stage1) {
const softfloat = b.addStaticLibrary("softfloat", null);
softfloat.setBuildMode(.ReleaseFast);
softfloat.setTarget(target);
@@ -360,8 +358,7 @@ pub fn build(b: *Builder) !void {
exe_options.addOption(bool, "enable_tracy_callstack", tracy_callstack);
exe_options.addOption(bool, "enable_tracy_allocation", tracy_allocation);
exe_options.addOption(bool, "value_tracing", value_tracing);
- exe_options.addOption(bool, "is_stage1", is_stage1);
- exe_options.addOption(bool, "omit_stage2", omit_stage2);
+ exe_options.addOption(bool, "have_stage1", have_stage1);
if (tracy) |tracy_path| {
const client_cpp = fs.path.join(
b.allocator,
@@ -396,8 +393,7 @@ pub fn build(b: *Builder) !void {
test_cases_options.addOption(bool, "enable_link_snapshots", enable_link_snapshots);
test_cases_options.addOption(bool, "skip_non_native", skip_non_native);
test_cases_options.addOption(bool, "skip_stage1", skip_stage1);
- test_cases_options.addOption(bool, "is_stage1", is_stage1);
- test_cases_options.addOption(bool, "omit_stage2", omit_stage2);
+ test_cases_options.addOption(bool, "have_stage1", have_stage1);
test_cases_options.addOption(bool, "have_llvm", enable_llvm);
test_cases_options.addOption(bool, "llvm_has_m68k", llvm_has_m68k);
test_cases_options.addOption(bool, "llvm_has_csky", llvm_has_csky);
@@ -418,7 +414,7 @@ pub fn build(b: *Builder) !void {
const test_cases_step = b.step("test-cases", "Run the main compiler test cases");
test_cases_step.dependOn(&test_cases.step);
if (!skip_stage2_tests) {
- toolchain_step.dependOn(test_cases_step);
+ test_step.dependOn(test_cases_step);
}
var chosen_modes: [4]builtin.Mode = undefined;
@@ -442,11 +438,11 @@ pub fn build(b: *Builder) !void {
const modes = chosen_modes[0..chosen_mode_index];
// run stage1 `zig fmt` on this build.zig file just to make sure it works
- toolchain_step.dependOn(&fmt_build_zig.step);
+ test_step.dependOn(&fmt_build_zig.step);
const fmt_step = b.step("test-fmt", "Run zig fmt against build.zig to make sure it works");
fmt_step.dependOn(&fmt_build_zig.step);
- toolchain_step.dependOn(tests.addPkgTests(
+ test_step.dependOn(tests.addPkgTests(
b,
test_filter,
"test/behavior.zig",
@@ -457,11 +453,10 @@ pub fn build(b: *Builder) !void {
skip_non_native,
skip_libc,
skip_stage1,
- omit_stage2,
- is_stage1,
+ skip_stage2_tests,
));
- toolchain_step.dependOn(tests.addPkgTests(
+ test_step.dependOn(tests.addPkgTests(
b,
test_filter,
"lib/compiler_rt.zig",
@@ -472,11 +467,10 @@ pub fn build(b: *Builder) !void {
skip_non_native,
true, // skip_libc
skip_stage1,
- omit_stage2 or true, // TODO get these all passing
- is_stage1,
+ skip_stage2_tests or true, // TODO get these all passing
));
- toolchain_step.dependOn(tests.addPkgTests(
+ test_step.dependOn(tests.addPkgTests(
b,
test_filter,
"lib/c.zig",
@@ -487,37 +481,36 @@ pub fn build(b: *Builder) !void {
skip_non_native,
true, // skip_libc
skip_stage1,
- omit_stage2 or true, // TODO get these all passing
- is_stage1,
+ skip_stage2_tests or true, // TODO get these all passing
));
- toolchain_step.dependOn(tests.addCompareOutputTests(b, test_filter, modes));
- toolchain_step.dependOn(tests.addStandaloneTests(
+ test_step.dependOn(tests.addCompareOutputTests(b, test_filter, modes));
+ test_step.dependOn(tests.addStandaloneTests(
b,
test_filter,
modes,
skip_non_native,
enable_macos_sdk,
target,
- omit_stage2,
+ skip_stage2_tests,
b.enable_darling,
b.enable_qemu,
b.enable_rosetta,
b.enable_wasmtime,
b.enable_wine,
));
- toolchain_step.dependOn(tests.addLinkTests(b, test_filter, modes, enable_macos_sdk, omit_stage2));
- toolchain_step.dependOn(tests.addStackTraceTests(b, test_filter, modes));
- toolchain_step.dependOn(tests.addCliTests(b, test_filter, modes));
- toolchain_step.dependOn(tests.addAssembleAndLinkTests(b, test_filter, modes));
- toolchain_step.dependOn(tests.addTranslateCTests(b, test_filter));
+ test_step.dependOn(tests.addLinkTests(b, test_filter, modes, enable_macos_sdk, skip_stage2_tests));
+ test_step.dependOn(tests.addStackTraceTests(b, test_filter, modes));
+ test_step.dependOn(tests.addCliTests(b, test_filter, modes));
+ test_step.dependOn(tests.addAssembleAndLinkTests(b, test_filter, modes));
+ test_step.dependOn(tests.addTranslateCTests(b, test_filter));
if (!skip_run_translated_c) {
- toolchain_step.dependOn(tests.addRunTranslatedCTests(b, test_filter, target));
+ test_step.dependOn(tests.addRunTranslatedCTests(b, test_filter, target));
}
// tests for this feature are disabled until we have the self-hosted compiler available
- // toolchain_step.dependOn(tests.addGenHTests(b, test_filter));
+ // test_step.dependOn(tests.addGenHTests(b, test_filter));
- const std_step = tests.addPkgTests(
+ test_step.dependOn(tests.addPkgTests(
b,
test_filter,
"lib/std/std.zig",
@@ -528,14 +521,8 @@ pub fn build(b: *Builder) !void {
skip_non_native,
skip_libc,
skip_stage1,
- omit_stage2 or true, // TODO get these all passing
- is_stage1,
- );
-
- const test_step = b.step("test", "Run all the tests");
- test_step.dependOn(toolchain_step);
- test_step.dependOn(std_step);
- test_step.dependOn(docs_step);
+ true, // TODO get these all passing
+ ));
}
const exe_cflags = [_][]const u8{
diff --git a/ci/azure/build.zig b/ci/azure/build.zig
deleted file mode 100644
index 3fec555321..0000000000
--- a/ci/azure/build.zig
+++ /dev/null
@@ -1,976 +0,0 @@
-const std = @import("std");
-const builtin = std.builtin;
-const Builder = std.build.Builder;
-const BufMap = std.BufMap;
-const mem = std.mem;
-const ArrayList = std.ArrayList;
-const io = std.io;
-const fs = std.fs;
-const InstallDirectoryOptions = std.build.InstallDirectoryOptions;
-const assert = std.debug.assert;
-
-const zig_version = std.builtin.Version{ .major = 0, .minor = 10, .patch = 0 };
-
-pub fn build(b: *Builder) !void {
- b.setPreferredReleaseMode(.ReleaseFast);
- const mode = b.standardReleaseOptions();
- const target = b.standardTargetOptions(.{});
- const single_threaded = b.option(bool, "single-threaded", "Build artifacts that run in single threaded mode");
- const use_zig_libcxx = b.option(bool, "use-zig-libcxx", "If libc++ is needed, use zig's bundled version, don't try to integrate with the system") orelse false;
-
- const docgen_exe = b.addExecutable("docgen", "doc/docgen.zig");
- docgen_exe.single_threaded = single_threaded;
-
- const rel_zig_exe = try fs.path.relative(b.allocator, b.build_root, b.zig_exe);
- const langref_out_path = fs.path.join(
- b.allocator,
- &[_][]const u8{ b.cache_root, "langref.html" },
- ) catch unreachable;
- const docgen_cmd = docgen_exe.run();
- docgen_cmd.addArgs(&[_][]const u8{
- rel_zig_exe,
- "doc" ++ fs.path.sep_str ++ "langref.html.in",
- langref_out_path,
- });
- docgen_cmd.step.dependOn(&docgen_exe.step);
-
- const docs_step = b.step("docs", "Build documentation");
- docs_step.dependOn(&docgen_cmd.step);
-
- const is_stage1 = b.option(bool, "stage1", "Build the stage1 compiler, put stage2 behind a feature flag") orelse false;
- const omit_stage2 = b.option(bool, "omit-stage2", "Do not include stage2 behind a feature flag inside stage1") orelse false;
- const static_llvm = b.option(bool, "static-llvm", "Disable integration with system-installed LLVM, Clang, LLD, and libc++") orelse false;
- const enable_llvm = b.option(bool, "enable-llvm", "Build self-hosted compiler with LLVM backend enabled") orelse (is_stage1 or static_llvm);
- const llvm_has_m68k = b.option(
- bool,
- "llvm-has-m68k",
- "Whether LLVM has the experimental target m68k enabled",
- ) orelse false;
- const llvm_has_csky = b.option(
- bool,
- "llvm-has-csky",
- "Whether LLVM has the experimental target csky enabled",
- ) orelse false;
- const llvm_has_arc = b.option(
- bool,
- "llvm-has-arc",
- "Whether LLVM has the experimental target arc enabled",
- ) orelse false;
- const config_h_path_option = b.option([]const u8, "config_h", "Path to the generated config.h");
-
- b.installDirectory(InstallDirectoryOptions{
- .source_dir = "lib",
- .install_dir = .lib,
- .install_subdir = "zig",
- .exclude_extensions = &[_][]const u8{
- // exclude files from lib/std/compress/
- ".gz",
- ".z.0",
- ".z.9",
- "rfc1951.txt",
- "rfc1952.txt",
- // exclude files from lib/std/compress/deflate/testdata
- ".expect",
- ".expect-noinput",
- ".golden",
- ".input",
- "compress-e.txt",
- "compress-gettysburg.txt",
- "compress-pi.txt",
- "rfc1951.txt",
- // exclude files from lib/std/tz/
- ".tzif",
- // others
- "README.md",
- },
- .blank_extensions = &[_][]const u8{
- "test.zig",
- },
- });
-
- const tracy = b.option([]const u8, "tracy", "Enable Tracy integration. Supply path to Tracy source");
- const tracy_callstack = b.option(bool, "tracy-callstack", "Include callstack information with Tracy data. Does nothing if -Dtracy is not provided") orelse false;
- const tracy_allocation = b.option(bool, "tracy-allocation", "Include allocation information with Tracy data. Does nothing if -Dtracy is not provided") orelse false;
- const force_gpa = b.option(bool, "force-gpa", "Force the compiler to use GeneralPurposeAllocator") orelse false;
- const link_libc = b.option(bool, "force-link-libc", "Force self-hosted compiler to link libc") orelse enable_llvm;
- const strip = b.option(bool, "strip", "Omit debug information") orelse false;
- const value_tracing = b.option(bool, "value-tracing", "Enable extra state tracking to help troubleshoot bugs in the compiler (using the std.debug.Trace API)") orelse false;
-
- const mem_leak_frames: u32 = b.option(u32, "mem-leak-frames", "How many stack frames to print when a memory leak occurs. Tests get 2x this amount.") orelse blk: {
- if (strip) break :blk @as(u32, 0);
- if (mode != .Debug) break :blk 0;
- break :blk 4;
- };
-
- const main_file: ?[]const u8 = if (is_stage1) null else "src/main.zig";
-
- const exe = b.addExecutable("zig", main_file);
- exe.strip = strip;
- exe.install();
- exe.setBuildMode(mode);
- exe.setTarget(target);
-
- b.default_step.dependOn(&exe.step);
- exe.single_threaded = single_threaded;
-
- if (target.isWindows() and target.getAbi() == .gnu) {
- // LTO is currently broken on mingw, this can be removed when it's fixed.
- exe.want_lto = false;
- }
-
- const exe_options = b.addOptions();
- exe.addOptions("build_options", exe_options);
-
- exe_options.addOption(u32, "mem_leak_frames", mem_leak_frames);
- exe_options.addOption(bool, "skip_non_native", false);
- exe_options.addOption(bool, "have_llvm", enable_llvm);
- exe_options.addOption(bool, "llvm_has_m68k", llvm_has_m68k);
- exe_options.addOption(bool, "llvm_has_csky", llvm_has_csky);
- exe_options.addOption(bool, "llvm_has_arc", llvm_has_arc);
- exe_options.addOption(bool, "force_gpa", force_gpa);
-
- if (link_libc) {
- exe.linkLibC();
- }
-
- const is_debug = mode == .Debug;
- const enable_logging = b.option(bool, "log", "Enable debug logging with --debug-log") orelse is_debug;
- const enable_link_snapshots = b.option(bool, "link-snapshot", "Whether to enable linker state snapshots") orelse false;
-
- const opt_version_string = b.option([]const u8, "version-string", "Override Zig version string. Default is to find out with git.");
- const version = if (opt_version_string) |version| version else v: {
- const version_string = b.fmt("{d}.{d}.{d}", .{ zig_version.major, zig_version.minor, zig_version.patch });
-
- var code: u8 = undefined;
- const git_describe_untrimmed = b.execAllowFail(&[_][]const u8{
- "git", "-C", b.build_root, "describe", "--match", "*.*.*", "--tags",
- }, &code, .Ignore) catch {
- break :v version_string;
- };
- const git_describe = mem.trim(u8, git_describe_untrimmed, " \n\r");
-
- switch (mem.count(u8, git_describe, "-")) {
- 0 => {
- // Tagged release version (e.g. 0.9.0).
- if (!mem.eql(u8, git_describe, version_string)) {
- std.debug.print("Zig version '{s}' does not match Git tag '{s}'\n", .{ version_string, git_describe });
- std.process.exit(1);
- }
- break :v version_string;
- },
- 2 => {
- // Untagged development build (e.g. 0.9.0-dev.2025+ecf0050a9).
- var it = mem.split(u8, git_describe, "-");
- const tagged_ancestor = it.next() orelse unreachable;
- const commit_height = it.next() orelse unreachable;
- const commit_id = it.next() orelse unreachable;
-
- const ancestor_ver = try std.builtin.Version.parse(tagged_ancestor);
- if (zig_version.order(ancestor_ver) != .gt) {
- std.debug.print("Zig version '{}' must be greater than tagged ancestor '{}'\n", .{ zig_version, ancestor_ver });
- std.process.exit(1);
- }
-
- // Check that the commit hash is prefixed with a 'g' (a Git convention).
- if (commit_id.len < 1 or commit_id[0] != 'g') {
- std.debug.print("Unexpected `git describe` output: {s}\n", .{git_describe});
- break :v version_string;
- }
-
- // The version is reformatted in accordance with the https://semver.org specification.
- break :v b.fmt("{s}-dev.{s}+{s}", .{ version_string, commit_height, commit_id[1..] });
- },
- else => {
- std.debug.print("Unexpected `git describe` output: {s}\n", .{git_describe});
- break :v version_string;
- },
- }
- };
- exe_options.addOption([:0]const u8, "version", try b.allocator.dupeZ(u8, version));
-
- if (enable_llvm) {
- const cmake_cfg = if (static_llvm) null else findAndParseConfigH(b, config_h_path_option);
-
- if (is_stage1) {
- const softfloat = b.addStaticLibrary("softfloat", null);
- softfloat.setBuildMode(.ReleaseFast);
- softfloat.setTarget(target);
- softfloat.addIncludeDir("deps/SoftFloat-3e-prebuilt");
- softfloat.addIncludeDir("deps/SoftFloat-3e/source/8086");
- softfloat.addIncludeDir("deps/SoftFloat-3e/source/include");
- softfloat.addCSourceFiles(&softfloat_sources, &[_][]const u8{ "-std=c99", "-O3" });
- softfloat.single_threaded = single_threaded;
-
- const zig0 = b.addExecutable("zig0", null);
- zig0.addCSourceFiles(&.{"src/stage1/zig0.cpp"}, &exe_cflags);
- zig0.addIncludeDir("zig-cache/tmp"); // for config.h
- zig0.defineCMacro("ZIG_VERSION_MAJOR", b.fmt("{d}", .{zig_version.major}));
- zig0.defineCMacro("ZIG_VERSION_MINOR", b.fmt("{d}", .{zig_version.minor}));
- zig0.defineCMacro("ZIG_VERSION_PATCH", b.fmt("{d}", .{zig_version.patch}));
- zig0.defineCMacro("ZIG_VERSION_STRING", b.fmt("\"{s}\"", .{version}));
-
- for ([_]*std.build.LibExeObjStep{ zig0, exe }) |artifact| {
- artifact.addIncludeDir("src");
- artifact.addIncludeDir("deps/SoftFloat-3e/source/include");
- artifact.addIncludeDir("deps/SoftFloat-3e-prebuilt");
-
- artifact.defineCMacro("ZIG_LINK_MODE", "Static");
-
- artifact.addCSourceFiles(&stage1_sources, &exe_cflags);
- artifact.addCSourceFiles(&optimized_c_sources, &[_][]const u8{ "-std=c99", "-O3" });
-
- artifact.linkLibrary(softfloat);
- artifact.linkLibCpp();
- }
-
- try addStaticLlvmOptionsToExe(zig0);
-
- const zig1_obj_ext = target.getObjectFormat().fileExt(target.getCpuArch());
- const zig1_obj_path = b.pathJoin(&.{ "zig-cache", "tmp", b.fmt("zig1{s}", .{zig1_obj_ext}) });
- const zig1_compiler_rt_path = b.pathJoin(&.{ b.pathFromRoot("lib"), "std", "special", "compiler_rt.zig" });
-
- const zig1_obj = zig0.run();
- zig1_obj.addArgs(&.{
- "src/stage1.zig",
- "-target",
- try target.zigTriple(b.allocator),
- "-mcpu=baseline",
- "--name",
- "zig1",
- "--zig-lib-dir",
- b.pathFromRoot("lib"),
- b.fmt("-femit-bin={s}", .{b.pathFromRoot(zig1_obj_path)}),
- "-fcompiler-rt",
- "-lc",
- });
- {
- zig1_obj.addArgs(&.{ "--pkg-begin", "build_options" });
- zig1_obj.addFileSourceArg(exe_options.getSource());
- zig1_obj.addArgs(&.{ "--pkg-end", "--pkg-begin", "compiler_rt", zig1_compiler_rt_path, "--pkg-end" });
- }
- switch (mode) {
- .Debug => {},
- .ReleaseFast => {
- zig1_obj.addArg("-OReleaseFast");
- zig1_obj.addArg("--strip");
- },
- .ReleaseSafe => {
- zig1_obj.addArg("-OReleaseSafe");
- zig1_obj.addArg("--strip");
- },
- .ReleaseSmall => {
- zig1_obj.addArg("-OReleaseSmall");
- zig1_obj.addArg("--strip");
- },
- }
- if (single_threaded orelse false) {
- zig1_obj.addArg("-fsingle-threaded");
- }
-
- exe.step.dependOn(&zig1_obj.step);
- exe.addObjectFile(zig1_obj_path);
-
- // This is intentionally a dummy path. stage1.zig tries to @import("compiler_rt") in case
- // of being built by cmake. But when built by zig it's gonna get a compiler_rt so that
- // is pointless.
- exe.addPackagePath("compiler_rt", "src/empty.zig");
- }
- if (cmake_cfg) |cfg| {
- // Inside this code path, we have to coordinate with system packaged LLVM, Clang, and LLD.
- // That means we also have to rely on stage1 compiled c++ files. We parse config.h to find
- // the information passed on to us from cmake.
- if (cfg.cmake_prefix_path.len > 0) {
- b.addSearchPrefix(cfg.cmake_prefix_path);
- }
-
- try addCmakeCfgOptionsToExe(b, cfg, exe, use_zig_libcxx);
- } else {
- // Here we are -Denable-llvm but no cmake integration.
- try addStaticLlvmOptionsToExe(exe);
- }
- }
-
- const semver = try std.SemanticVersion.parse(version);
- exe_options.addOption(std.SemanticVersion, "semver", semver);
-
- exe_options.addOption(bool, "enable_logging", enable_logging);
- exe_options.addOption(bool, "enable_link_snapshots", enable_link_snapshots);
- exe_options.addOption(bool, "enable_tracy", tracy != null);
- exe_options.addOption(bool, "enable_tracy_callstack", tracy_callstack);
- exe_options.addOption(bool, "enable_tracy_allocation", tracy_allocation);
- exe_options.addOption(bool, "value_tracing", value_tracing);
- exe_options.addOption(bool, "is_stage1", is_stage1);
- exe_options.addOption(bool, "omit_stage2", omit_stage2);
- if (tracy) |tracy_path| {
- const client_cpp = fs.path.join(
- b.allocator,
- &[_][]const u8{ tracy_path, "TracyClient.cpp" },
- ) catch unreachable;
-
- // On mingw, we need to opt into windows 7+ to get some features required by tracy.
- const tracy_c_flags: []const []const u8 = if (target.isWindows() and target.getAbi() == .gnu)
- &[_][]const u8{ "-DTRACY_ENABLE=1", "-fno-sanitize=undefined", "-D_WIN32_WINNT=0x601" }
- else
- &[_][]const u8{ "-DTRACY_ENABLE=1", "-fno-sanitize=undefined" };
-
- exe.addIncludeDir(tracy_path);
- exe.addCSourceFile(client_cpp, tracy_c_flags);
- if (!enable_llvm) {
- exe.linkSystemLibraryName("c++");
- }
- exe.linkLibC();
-
- if (target.isWindows()) {
- exe.linkSystemLibrary("dbghelp");
- exe.linkSystemLibrary("ws2_32");
- }
- }
-}
-
-const exe_cflags = [_][]const u8{
- "-std=c++14",
- "-D__STDC_CONSTANT_MACROS",
- "-D__STDC_FORMAT_MACROS",
- "-D__STDC_LIMIT_MACROS",
- "-D_GNU_SOURCE",
- "-fvisibility-inlines-hidden",
- "-fno-exceptions",
- "-fno-rtti",
- "-Werror=type-limits",
- "-Wno-missing-braces",
- "-Wno-comment",
-};
-
-fn addCmakeCfgOptionsToExe(
- b: *Builder,
- cfg: CMakeConfig,
- exe: *std.build.LibExeObjStep,
- use_zig_libcxx: bool,
-) !void {
- exe.addObjectFile(fs.path.join(b.allocator, &[_][]const u8{
- cfg.cmake_binary_dir,
- "zigcpp",
- b.fmt("{s}{s}{s}", .{ exe.target.libPrefix(), "zigcpp", exe.target.staticLibSuffix() }),
- }) catch unreachable);
- assert(cfg.lld_include_dir.len != 0);
- exe.addIncludeDir(cfg.lld_include_dir);
- addCMakeLibraryList(exe, cfg.clang_libraries);
- addCMakeLibraryList(exe, cfg.lld_libraries);
- addCMakeLibraryList(exe, cfg.llvm_libraries);
-
- if (use_zig_libcxx) {
- exe.linkLibCpp();
- } else {
- const need_cpp_includes = true;
-
- // System -lc++ must be used because in this code path we are attempting to link
- // against system-provided LLVM, Clang, LLD.
- if (exe.target.getOsTag() == .linux) {
- // First we try to static link against gcc libstdc++. If that doesn't work,
- // we fall back to -lc++ and cross our fingers.
- addCxxKnownPath(b, cfg, exe, "libstdc++.a", "", need_cpp_includes) catch |err| switch (err) {
- error.RequiredLibraryNotFound => {
- exe.linkSystemLibrary("c++");
- },
- else => |e| return e,
- };
- exe.linkSystemLibrary("unwind");
- } else if (exe.target.isFreeBSD()) {
- try addCxxKnownPath(b, cfg, exe, "libc++.a", null, need_cpp_includes);
- exe.linkSystemLibrary("pthread");
- } else if (exe.target.getOsTag() == .openbsd) {
- try addCxxKnownPath(b, cfg, exe, "libc++.a", null, need_cpp_includes);
- try addCxxKnownPath(b, cfg, exe, "libc++abi.a", null, need_cpp_includes);
- } else if (exe.target.isDarwin()) {
- exe.linkSystemLibrary("c++");
- }
- }
-
- if (cfg.dia_guids_lib.len != 0) {
- exe.addObjectFile(cfg.dia_guids_lib);
- }
-}
-
-fn addStaticLlvmOptionsToExe(
- exe: *std.build.LibExeObjStep,
-) !void {
- // Adds the Zig C++ sources which both stage1 and stage2 need.
- //
- // We need this because otherwise zig_clang_cc1_main.cpp ends up pulling
- // in a dependency on llvm::cfg::Update User documentation that doesn't belong to whatever
- immediately follows it, like container level documentation, goes
- in top level doc comments. A top level doc comment is one that
+ immediately follows it, like container-level documentation, goes
+ in top-level doc comments. A top-level doc comment is one that
begins with two slashes and an exclamation point:
{#syntax#}//!{#endsyntax#}.
i
or u
followed by digits. For example, the identifier
{#syntax#}i7{#endsyntax#} refers to a signed 7-bit integer. The maximum allowed bit-width of an
- integer type is {#syntax#}65535{#endsyntax#}.
+ integer type is {#syntax#}65535{#endsyntax#}. For signed integer types, Zig uses a
+ two's complement representation.
{#see_also|Wrapping Operations#}
{#header_close#}
@@ -2768,7 +2770,7 @@ test "comptime @intToPtr" {
}
}
{#code_end#}
- {#see_also|Optional Pointers|@intToPtr|@ptrToInt|C Pointers|Pointers to Zero Bit Types#}
+ {#see_also|Optional Pointers|@intToPtr|@ptrToInt|C Pointers#}
{#header_open|volatile#}
Loads and stores are assumed to not have side effects. If a given load or store should have side effects, such as Memory Mapped Input/Output (MMIO), use {#syntax#}volatile{#endsyntax#}. @@ -2862,19 +2864,22 @@ var foo: u8 align(4) = 100; test "global variable alignment" { try expect(@typeInfo(@TypeOf(&foo)).Pointer.alignment == 4); try expect(@TypeOf(&foo) == *align(4) u8); - const as_pointer_to_array: *[1]u8 = &foo; - const as_slice: []u8 = as_pointer_to_array; - try expect(@TypeOf(as_slice) == []align(4) u8); + const as_pointer_to_array: *align(4) [1]u8 = &foo; + const as_slice: []align(4) u8 = as_pointer_to_array; + const as_unaligned_slice: []u8 = as_slice; + try expect(as_unaligned_slice[0] == 100); } -fn derp() align(@sizeOf(usize) * 2) i32 { return 1234; } +fn derp() align(@sizeOf(usize) * 2) i32 { + return 1234; +} fn noop1() align(1) void {} fn noop4() align(4) void {} test "function alignment" { try expect(derp() == 1234); - try expect(@TypeOf(noop1) == fn() align(1) void); - try expect(@TypeOf(noop4) == fn() align(4) void); + try expect(@TypeOf(noop1) == fn () align(1) void); + try expect(@TypeOf(noop4) == fn () align(4) void); noop1(); noop4(); } @@ -3336,6 +3341,7 @@ fn doTheTest() !void { Zig allows the address to be taken of a non-byte-aligned field:
{#code_begin|test|pointer_to_non-byte_aligned_field#} + {#backend_stage1#} const std = @import("std"); const expect = std.testing.expect; @@ -3391,7 +3397,8 @@ fn bar(x: *const u3) u3 {Pointers to non-ABI-aligned fields share the same address as the other fields within their host integer:
- {#code_begin|test|pointer_to_non-bit_aligned_field#} + {#code_begin|test|packed_struct_field_addrs#} + {#backend_stage1#} const std = @import("std"); const expect = std.testing.expect; @@ -3407,7 +3414,7 @@ var bit_field = BitField{ .c = 3, }; -test "pointer to non-bit-aligned field" { +test "pointers of sub-byte-aligned fields share addresses" { try expect(@ptrToInt(&bit_field.a) == @ptrToInt(&bit_field.b)); try expect(@ptrToInt(&bit_field.a) == @ptrToInt(&bit_field.c)); } @@ -3438,20 +3445,22 @@ test "pointer to non-bit-aligned field" { } {#code_end#}- Packed structs have 1-byte alignment. However if you have an overaligned pointer to a packed struct, - Zig should correctly understand the alignment of fields. However there is - a bug: + Packed structs have the same alignment as their backing integer, however, overaligned + pointers to packed structs can override this:
- {#code_begin|test_err|expected type '*u32', found '*align(1) u32'#} + {#code_begin|test|overaligned_packed_struct#} +const std = @import("std"); +const expect = std.testing.expect; + const S = packed struct { a: u32, b: u32, }; test "overaligned pointer to packed struct" { - var foo: S align(4) = undefined; + var foo: S align(4) = .{ .a = 1, .b = 2 }; const ptr: *align(4) S = &foo; const ptr_to_b: *u32 = &ptr.b; - _ = ptr_to_b; + try expect(ptr_to_b.* == 2); } {#code_end#}When this bug is fixed, the above test in the documentation will unexpectedly pass, which will @@ -3698,7 +3707,7 @@ test "@tagName" {
By default, enums are not guaranteed to be compatible with the C ABI:
- {#code_begin|obj_err|parameter of type 'Foo' not allowed in function with calling convention 'C'#} + {#code_begin|obj_err|parameter of type 'test.Foo' not allowed in function with calling convention 'C'#} const Foo = enum { a, b, c }; export fn entry(foo: Foo) void { _ = foo; } {#code_end#} @@ -4004,7 +4013,7 @@ fn makeNumber() Number { This is typically used for type safety when interacting with C code that does not expose struct details. Example: - {#code_begin|test_err|expected type '*Derp', found '*Wat'#} + {#code_begin|test_err|expected type '*test.Derp', found '*test.Wat'#} const Derp = opaque {}; const Wat = opaque {}; @@ -4203,7 +4212,7 @@ test "switch on tagged union" { When a {#syntax#}switch{#endsyntax#} expression does not have an {#syntax#}else{#endsyntax#} clause, it must exhaustively list all the possible values. Failure to do so is a compile error: - {#code_begin|test_err|not handled in switch#} + {#code_begin|test_err|unhandled enumeration value#} const Color = enum { auto, off, @@ -5015,8 +5024,8 @@ fn shiftLeftOne(a: u32) callconv(.Inline) u32 { // Another file can use @import and call sub2 pub fn sub2(a: i8, b: i8) i8 { return a - b; } -// Functions can be used as values and are equivalent to pointers. -const call2_op = fn (a: i8, b: i8) i8; +// Function pointers are prefixed with `*const `. +const call2_op = *const fn (a: i8, b: i8) i8; fn do_op(fn_call: call2_op, op1: i8, op2: i8) i8 { return fn_call(op1, op2); } @@ -5026,17 +5035,9 @@ test "function" { try expect(do_op(sub2, 5, 6) == -1); } {#code_end#} -Function values are like pointers:
- {#code_begin|obj#} -const assert = @import("std").debug.assert; - -comptime { - assert(@TypeOf(foo) == fn()void); - assert(@sizeOf(fn()void) == @sizeOf(?fn()void)); -} - -fn foo() void { } - {#code_end#} +There is a difference between a function body and a function pointer. + Function bodies are {#link|comptime#}-only types while function {#link|Pointers#} may be + runtime-known.
{#header_open|Pass-by-value Parameters#}Primitive types such as {#link|Integers#} and {#link|Floats#} passed as parameters @@ -6123,10 +6124,11 @@ test "float widening" { two choices about the coercion.
{#link|Zero Bit Types#} may be coerced to single-item {#link|Pointers#}, - regardless of const.
-TODO document the reasoning for this
-TODO document whether vice versa should work and why
- {#code_begin|test|coerce_zero_bit_types#} -test "coercion of zero bit types" { - var x: void = {}; - var y: *void = x; - _ = y; -} - {#code_end#} - {#header_close#} {#header_open|Type Coercion: undefined#}{#link|undefined#} can be cast to any type.
{#header_close#} @@ -6467,7 +6456,6 @@ test "peer type resolution: *const T and ?*T" {These types can only ever have one possible value, and thus @@ -6527,7 +6515,7 @@ test "turn HashMap into a set with void" {
Expressions of type {#syntax#}void{#endsyntax#} are the only ones whose value can be ignored. For example:
- {#code_begin|test_err|expression value is ignored#} + {#code_begin|test_err|ignored#} test "ignoring expression value" { foo(); } @@ -6553,37 +6541,6 @@ fn foo() i32 { } {#code_end#} {#header_close#} - - {#header_open|Pointers to Zero Bit Types#} -Pointers to zero bit types also have zero bits. They always compare equal to each other:
- {#code_begin|test|pointers_to_zero_bits#} -const std = @import("std"); -const expect = std.testing.expect; - -test "pointer to empty struct" { - const Empty = struct {}; - var a = Empty{}; - var b = Empty{}; - var ptr_a = &a; - var ptr_b = &b; - comptime try expect(ptr_a == ptr_b); -} - {#code_end#} -The type being pointed to can only ever be one value; therefore loads and stores are - never generated. {#link|ptrToInt#} and {#link|intToPtr#} are not allowed:
- {#code_begin|test_err#} -const Empty = struct {}; - -test "@ptrToInt for pointer to zero bit type" { - var a = Empty{}; - _ = @ptrToInt(&a); -} - -test "@intToPtr for pointer to zero bit type" { - _ = @intToPtr(*Empty, 0x1); -} - {#code_end#} - {#header_close#} {#header_close#} {#header_open|Result Location Semantics#} @@ -6666,7 +6623,7 @@ fn gimmeTheBiggerInteger(a: u64, b: u64) u64 {For example, if we were to introduce another function to the above snippet:
- {#code_begin|test_err|values of type 'type' must be comptime known#} + {#code_begin|test_err|unable to resolve comptime value#} fn max(comptime T: type, a: T, b: T) T { return if (a > b) a else b; } @@ -6692,7 +6649,7 @@ fn foo(condition: bool) void {For example:
- {#code_begin|test_err|operator not allowed for type 'bool'#} + {#code_begin|test_err|operator > not allowed for type 'bool'#} fn max(comptime T: type, a: T, b: T) T { return if (a > b) a else b; } @@ -6837,7 +6794,7 @@ fn performFn(start_value: i32) i32 { use a {#syntax#}comptime{#endsyntax#} expression to guarantee that the expression will be evaluated at compile-time. If this cannot be accomplished, the compiler will emit an error. For example: - {#code_begin|test_err|unable to evaluate constant expression#} + {#code_begin|test_err|comptime call of extern function#} extern fn exit() noreturn; test "foo" { @@ -6889,7 +6846,7 @@ test "fibonacci" {Imagine if we had forgotten the base case of the recursive function and tried to run the tests:
- {#code_begin|test_err|operation caused overflow#} + {#code_begin|test_err|overflow of integer type#} const expect = @import("std").testing.expect; fn fibonacci(index: u32) u32 { @@ -6913,7 +6870,8 @@ test "fibonacci" { But what would have happened if we used a signed integer? {#code_begin|test_err|evaluation exceeded 1000 backwards branches#} -const expect = @import("std").testing.expect; + {#backend_stage1#} +const assert = @import("std").debug.assert; fn fibonacci(index: i32) i32 { //if (index < 2) return index; @@ -6922,7 +6880,7 @@ fn fibonacci(index: i32) i32 { test "fibonacci" { comptime { - try expect(fibonacci(7) == 13); + try assert(fibonacci(7) == 13); } } {#code_end#} @@ -6935,8 +6893,8 @@ test "fibonacci" {What if we fix the base case, but put the wrong value in the {#syntax#}expect{#endsyntax#} line?
- {#code_begin|test_err|test "fibonacci"... FAIL (TestUnexpectedResult)#} -const expect = @import("std").testing.expect; + {#code_begin|test_err|reached unreachable#} +const assert = @import("std").debug.assert; fn fibonacci(index: i32) i32 { if (index < 2) return index; @@ -6945,16 +6903,10 @@ fn fibonacci(index: i32) i32 { test "fibonacci" { comptime { - try expect(fibonacci(7) == 99999); + try assert(fibonacci(7) == 99999); } } {#code_end#} -- What happened is Zig started interpreting the {#syntax#}expect{#endsyntax#} function with the - parameter {#syntax#}ok{#endsyntax#} set to {#syntax#}false{#endsyntax#}. When the interpreter hit - {#syntax#}@panic{#endsyntax#} it emitted a compile error because a panic during compile - causes a compile error if it is detected at compile-time. -
At container level (outside of any function), all expressions are implicitly @@ -7280,6 +7232,7 @@ pub fn main() void {
{#code_begin|exe#} {#target_linux_x86_64#} + {#backend_stage1#} pub fn main() noreturn { const msg = "hello world\n"; _ = syscall3(SYS_write, STDOUT_FILENO, @ptrToInt(msg), msg.len); @@ -7497,6 +7450,7 @@ test "global assembly" { or resumer (in the case of subsequent suspensions). {#code_begin|test|suspend_no_resume#} + {#backend_stage1#} const std = @import("std"); const expect = std.testing.expect; @@ -7524,6 +7478,7 @@ fn func() void { {#link|@frame#} provides access to the async function frame pointer. {#code_begin|test|async_suspend_block#} + {#backend_stage1#} const std = @import("std"); const expect = std.testing.expect; @@ -7562,6 +7517,7 @@ fn testSuspendBlock() void { never returns to its resumer and continues executing. {#code_begin|test|resume_from_suspend#} + {#backend_stage1#} const std = @import("std"); const expect = std.testing.expect; @@ -7598,6 +7554,7 @@ fn testResumeFromSuspend(my_result: *i32) void { and the return value of the async function would be lost. {#code_begin|test|async_await#} + {#backend_stage1#} const std = @import("std"); const expect = std.testing.expect; @@ -7642,6 +7599,7 @@ fn func() void { return value directly from the target function's frame. {#code_begin|test|async_await_sequence#} + {#backend_stage1#} const std = @import("std"); const expect = std.testing.expect; @@ -7695,6 +7653,7 @@ fn seq(c: u8) void { {#syntax#}async{#endsyntax#}/{#syntax#}await{#endsyntax#} usage: {#code_begin|exe|async#} + {#backend_stage1#} const std = @import("std"); const Allocator = std.mem.Allocator; @@ -7773,6 +7732,7 @@ fn readFile(allocator: Allocator, filename: []const u8) ![]u8 { observe the same behavior, with one tiny difference: {#code_begin|exe|blocking#} + {#backend_stage1#} const std = @import("std"); const Allocator = std.mem.Allocator; @@ -7910,6 +7870,7 @@ comptime { {#syntax#}await{#endsyntax#} will copy the result from {#syntax#}result_ptr{#endsyntax#}. {#code_begin|test|async_struct_field_fn_pointer#} + {#backend_stage1#} const std = @import("std"); const expect = std.testing.expect; @@ -8071,8 +8032,8 @@ fn func(y: *i32) void { {#header_close#} {#header_open|@byteSwap#} -{#syntax#}@byteSwap(comptime T: type, operand: T) T{#endsyntax#}-
{#syntax#}T{#endsyntax#} must be an integer type with bit count evenly divisible by 8.
+{#syntax#}@byteSwap(operand: anytype) T{#endsyntax#}+
{#syntax#}@TypeOf(operand){#endsyntax#} must be an integer type or an integer vector type with bit count evenly divisible by 8.
{#syntax#}operand{#endsyntax#} may be an {#link|integer|Integers#} or {#link|vector|Vectors#}.
Swaps the byte order of the integer. This converts a big endian integer to a little endian integer, @@ -8089,8 +8050,8 @@ fn func(y: *i32) void { {#header_close#} {#header_open|@bitReverse#} -
{#syntax#}@bitReverse(comptime T: type, integer: T) T{#endsyntax#}-
{#syntax#}T{#endsyntax#} accepts any integer type.
+{#syntax#}@bitReverse(integer: anytype) T{#endsyntax#}+
{#syntax#}@TypeOf(anytype){#endsyntax#} accepts any integer type or integer vector type.
Reverses the bitpattern of an integer value, including the sign bit if applicable.
@@ -8229,8 +8190,8 @@ pub const CallOptions = struct { {#header_close#} {#header_open|@clz#} -{#syntax#}@clz(comptime T: type, operand: T){#endsyntax#}-
{#syntax#}T{#endsyntax#} must be an integer type.
+{#syntax#}@clz(operand: anytype){#endsyntax#}+
{#syntax#}@TypeOf(operand){#endsyntax#} must be an integer type or an integer vector type.
{#syntax#}operand{#endsyntax#} may be an {#link|integer|Integers#} or {#link|vector|Vectors#}.
This function counts the number of most-significant (leading in a big-Endian sense) zeroes in an integer. @@ -8375,8 +8336,8 @@ test "main" { {#header_close#} {#header_open|@ctz#} -
{#syntax#}@ctz(comptime T: type, operand: T){#endsyntax#}-
{#syntax#}T{#endsyntax#} must be an integer type.
+{#syntax#}@ctz(operand: anytype){#endsyntax#}+
{#syntax#}@TypeOf(operand){#endsyntax#} must be an integer type or an integer vector type.
{#syntax#}operand{#endsyntax#} may be an {#link|integer|Integers#} or {#link|vector|Vectors#}.
This function counts the number of least-significant (trailing in a big-Endian sense) zeroes in an integer. @@ -8677,6 +8638,7 @@ test "decl access by string" { allows one to, for example, heap-allocate an async function frame:
{#code_begin|test|heap_allocated_frame#} + {#backend_stage1#} const std = @import("std"); test "heap allocated frame" { @@ -9011,8 +8973,8 @@ test "@wasmMemoryGrow" { {#header_close#} {#header_open|@popCount#} -{#syntax#}@popCount(comptime T: type, operand: T){#endsyntax#}-
{#syntax#}T{#endsyntax#} must be an integer type.
+{#syntax#}@popCount(operand: anytype){#endsyntax#}+
{#syntax#}@TypeOf(operand){#endsyntax#} must be an integer type.
{#syntax#}operand{#endsyntax#} may be an {#link|integer|Integers#} or {#link|vector|Vectors#}.
Counts the number of bits set in an integer.
@@ -9423,12 +9385,6 @@ const std = @import("std"); const expect = std.testing.expect; test "vector @reduce" { - // This test regressed with LLVM 14: - // https://github.com/llvm/llvm-project/issues/55522 - // We'll skip this test unless the self-hosted compiler is being used. - // After LLVM 15 is released we can delete this line. - if (@import("builtin").zig_backend == .stage1) return; - const value = @Vector(4, i32){ 1, -1, 1, -1 }; const result = value > @splat(4, @as(i32, 0)); // result is { true, false, true, false }; @@ -9938,7 +9894,7 @@ pub fn main() void { {#header_close#} {#header_open|Index out of Bounds#}
At compile-time:
- {#code_begin|test_err|index 5 outside array of size 5#} + {#code_begin|test_err|index 5 outside array of length 5#} comptime { const array: [5]u8 = "hello".*; const garbage = array[5]; @@ -9959,9 +9915,9 @@ fn foo(x: []const u8) u8 { {#header_close#} {#header_open|Cast Negative Number to Unsigned Integer#}At compile-time:
- {#code_begin|test_err|attempt to cast negative value to unsigned integer#} + {#code_begin|test_err|type 'u32' cannot represent integer value '-1'#} comptime { - const value: i32 = -1; + var value: i32 = -1; const unsigned = @intCast(u32, value); _ = unsigned; } @@ -9982,7 +9938,7 @@ pub fn main() void { {#header_close#} {#header_open|Cast Truncates Data#}At compile-time:
- {#code_begin|test_err|cast from 'u16' to 'u8' truncates bits#} + {#code_begin|test_err|type 'u8' cannot represent integer value '300'#} comptime { const spartan_count: u16 = 300; const byte = @intCast(u8, spartan_count); @@ -10017,7 +9973,7 @@ pub fn main() void {Example with addition at compile-time:
- {#code_begin|test_err|operation caused overflow#} + {#code_begin|test_err|overflow of integer type 'u8' with value '256'#} comptime { var byte: u8 = 255; byte += 1; @@ -10118,6 +10074,7 @@ test "wraparound addition and subtraction" { {#header_open|Exact Left Shift Overflow#}At compile-time:
{#code_begin|test_err|operation caused overflow#} + {#backend_stage1#} comptime { const x = @shlExact(@as(u8, 0b01010101), 2); _ = x; @@ -10137,6 +10094,7 @@ pub fn main() void { {#header_open|Exact Right Shift Overflow#}At compile-time:
{#code_begin|test_err|exact shift shifted out 1 bits#} + {#backend_stage1#} comptime { const x = @shrExact(@as(u8, 0b10101010), 2); _ = x; @@ -10200,6 +10158,7 @@ pub fn main() void { {#header_open|Exact Division Remainder#}At compile-time:
{#code_begin|test_err|exact division had a remainder#} + {#backend_stage1#} comptime { const a: u32 = 10; const b: u32 = 3; @@ -10302,7 +10261,7 @@ fn getNumberOrFail() !i32 { {#header_close#} {#header_open|Invalid Error Code#}At compile-time:
- {#code_begin|test_err|integer value 11 represents no error#} + {#code_begin|test_err|integer value '11' represents no error#} comptime { const err = error.AnError; const number = @errorToInt(err) + 10; @@ -10324,7 +10283,7 @@ pub fn main() void { {#header_close#} {#header_open|Invalid Enum Cast#}At compile-time:
- {#code_begin|test_err|has no tag matching integer value 3#} + {#code_begin|test_err|enum 'test.Foo' has no tag with value '3'#} const Foo = enum { a, b, @@ -10356,7 +10315,7 @@ pub fn main() void { {#header_open|Invalid Error Set Cast#}At compile-time:
- {#code_begin|test_err|error.B not a member of error set 'Set2'#} + {#code_begin|test_err|'error.B' not a member of error set 'error{A,C}'#} const Set1 = error{ A, B, @@ -10417,7 +10376,7 @@ fn foo(bytes: []u8) u32 { {#header_close#} {#header_open|Wrong Union Field Access#}At compile-time:
- {#code_begin|test_err|accessing union field 'float' while field 'int' is set#} + {#code_begin|test_err|access of union field 'float' while field 'int' is active#} comptime { var f = Foo{ .int = 42 }; f.float = 12.34; @@ -10509,6 +10468,7 @@ fn bar(f: *Foo) void {At compile-time:
{#code_begin|test_err|null pointer casted to type#} + {#backend_stage1#} comptime { const opt_ptr: ?*i32 = null; const ptr = @ptrCast(*i32, opt_ptr); @@ -10551,7 +10511,8 @@ const expect = std.testing.expect; test "using an allocator" { var buffer: [100]u8 = undefined; - const allocator = std.heap.FixedBufferAllocator.init(&buffer).allocator(); + var fba = std.heap.FixedBufferAllocator.init(&buffer); + const allocator = fba.allocator(); const result = try concat(allocator, "foo", "bar"); try expect(std.mem.eql(u8, "foobar", result)); } @@ -10647,7 +10608,7 @@ pub fn main() !void {String literals such as {#syntax#}"foo"{#endsyntax#} are in the global constant data section. This is why it is an error to pass a string literal to a mutable slice, like this:
- {#code_begin|test_err|cannot cast pointer to array literal to slice type '[]u8'#} + {#code_begin|test_err|expected type '[]u8', found '*const [5:0]u8'#} fn foo(s: []u8) void { _ = s; } @@ -11832,8 +11793,8 @@ fn readU32Be() u32 {}{#syntax#}anytype{#endsyntax#}
Loading...
Press escape to exit search and then '?' to see more options.
+Here are some things you can try:
+Press ? to see keyboard shortcuts and Esc to return.
There are no doc comments for this declaration.
'; - } - domTldDocs.classList.remove("hidden"); - } - - - function typeIsErrSet(typeIndex) { - let typeObj = zigAnalysis.types[typeIndex]; - return typeObj.kind === typeKinds.ErrorSet; - } - - - function typeIsStructWithNoFields(typeIndex) { - let typeObj = zigAnalysis.types[typeIndex]; - if (typeObj.kind !== typeKinds.Struct) - return false; - return (typeObj).fields.length == 0; - } - - - function typeIsGenericFn(typeIndex) { - let typeObj = zigAnalysis.types[typeIndex]; - if (typeObj.kind !== typeKinds.Fn) { - return false; - } - return (typeObj).generic_ret != null; - } - - - function renderFn(fnDecl) { - if ("refPath" in fnDecl.value.expr) { - let last = fnDecl.value.expr.refPath.length - 1; - let lastExpr = fnDecl.value.expr.refPath[last]; - console.assert("declRef" in lastExpr); - fnDecl = zigAnalysis.decls[lastExpr.declRef]; - } - - let value = resolveValue(fnDecl.value); - console.assert("type" in value.expr); - let typeObj = (zigAnalysis.types[value.expr.type]); - - domFnProtoCode.innerHTML = exprName(value.expr, { - wantHtml: true, - wantLink: true, - fnDecl, - }); - - let docsSource = null; - let srcNode = zigAnalysis.astNodes[fnDecl.src]; - if (srcNode.docs != null) { - docsSource = srcNode.docs; - } - - renderFnParamDocs(fnDecl, typeObj); - - let retExpr = resolveValue({expr:typeObj.ret}).expr; - if ("type" in retExpr) { - let retIndex = retExpr.type; - let errSetTypeIndex = (null); - let retType = zigAnalysis.types[retIndex]; - if (retType.kind === typeKinds.ErrorSet) { - errSetTypeIndex = retIndex; - } else if (retType.kind === typeKinds.ErrorUnion) { - errSetTypeIndex = (retType).err.type; - } - if (errSetTypeIndex != null) { - let errSetType = (zigAnalysis.types[errSetTypeIndex]); - renderErrorSet(errSetType); - } - } - - let protoSrcIndex = fnDecl.src; - if (typeIsGenericFn(value.expr.type)) { - // does the generic_ret contain a container? - var resolvedGenericRet = resolveValue({expr: typeObj.generic_ret}); - - if ("call" in resolvedGenericRet.expr){ - let call = zigAnalysis.calls[resolvedGenericRet.expr.call]; - let resolvedFunc = resolveValue({expr: call.func}); - if (!("type" in resolvedFunc.expr)) return; - let callee = zigAnalysis.types[resolvedFunc.expr.type]; - if (!callee.generic_ret) return; - resolvedGenericRet = resolveValue({expr: callee.generic_ret}); - } - - // TODO: see if unwrapping the `as` here is a good idea or not. - if ("as" in resolvedGenericRet.expr) { - resolvedGenericRet = { - expr: zigAnalysis.exprs[resolvedGenericRet.expr.as.exprArg] - }; - } - - if (!("type" in resolvedGenericRet.expr)) return; - const genericType = zigAnalysis.types[resolvedGenericRet.expr.type]; - if (isContainerType(genericType)) { - renderContainer(genericType) - } - - - - - - // old code - // let instantiations = nodesToFnsMap[protoSrcIndex]; - // let calls = nodesToCallsMap[protoSrcIndex]; - // if (instantiations == null && calls == null) { - // domFnNoExamples.classList.remove("hidden"); - // } else if (calls != null) { - // // if (fnObj.combined === undefined) fnObj.combined = allCompTimeFnCallsResult(calls); - // if (fnObj.combined != null) renderContainer(fnObj.combined); - - // resizeDomList(domListFnExamples, calls.length, ''); - - // for (let callI = 0; callI < calls.length; callI += 1) { - // let liDom = domListFnExamples.children[callI]; - // liDom.innerHTML = getCallHtml(fnDecl, calls[callI]); - // } - - // domFnExamples.classList.remove("hidden"); - // } else if (instantiations != null) { - // // TODO - // } - } else { - - domFnExamples.classList.add("hidden"); - domFnNoExamples.classList.add("hidden"); - } - - let protoSrcNode = zigAnalysis.astNodes[protoSrcIndex]; - if (docsSource == null && protoSrcNode != null && protoSrcNode.docs != null) { - docsSource = protoSrcNode.docs; - } - if (docsSource != null) { - domTldDocs.innerHTML = markdown(docsSource); - domTldDocs.classList.remove("hidden"); - } - domFnProto.classList.remove("hidden"); - } - - - function renderFnParamDocs(fnDecl, typeObj) { - let docCount = 0; - - let fnNode = zigAnalysis.astNodes[fnDecl.src]; - let fields = (fnNode.fields); - let isVarArgs = fnNode.varArgs; - - for (let i = 0; i < fields.length; i += 1) { - let field = fields[i]; - let fieldNode = zigAnalysis.astNodes[field]; - if (fieldNode.docs != null) { - docCount += 1; - } - } - if (docCount == 0) { - return; - } - - resizeDomList(domListParams, docCount, ''); - let domIndex = 0; - - for (let i = 0; i < fields.length; i += 1) { - let field = fields[i]; - let fieldNode = zigAnalysis.astNodes[field]; - let docs = fieldNode.docs; - if (fieldNode.docs == null) { - continue; - } - let docsNonEmpty = docs !== ""; - let divDom = domListParams.children[domIndex]; - domIndex += 1; - - - let value = typeObj.params[i]; - let preClass = docsNonEmpty ? ' class="fieldHasDocs"' : ""; - let html = '' + escapeHtml((fieldNode.name)) + ": ";
- if (isVarArgs && i === typeObj.params.length - 1) {
- html += '...';
- } else {
- let name = exprName(value, {wantHtml: false, wantLink: false});
- html += '' + name + '';
- }
-
- html += ',
';
-
- if (docsNonEmpty) {
- html += 'There are no doc comments for this declaration.
"; + } + domTldDocs.classList.remove("hidden"); + } + + function typeIsErrSet(typeIndex) { + let typeObj = zigAnalysis.types[typeIndex]; + return typeObj.kind === typeKinds.ErrorSet; + } + + function typeIsStructWithNoFields(typeIndex) { + let typeObj = zigAnalysis.types[typeIndex]; + if (typeObj.kind !== typeKinds.Struct) return false; + return typeObj.fields.length == 0; + } + + function typeIsGenericFn(typeIndex) { + let typeObj = zigAnalysis.types[typeIndex]; + if (typeObj.kind !== typeKinds.Fn) { + return false; + } + return typeObj.generic_ret != null; + } + + function renderFn(fnDecl) { + if ("refPath" in fnDecl.value.expr) { + let last = fnDecl.value.expr.refPath.length - 1; + let lastExpr = fnDecl.value.expr.refPath[last]; + console.assert("declRef" in lastExpr); + fnDecl = zigAnalysis.decls[lastExpr.declRef]; + } + + let value = resolveValue(fnDecl.value); + console.assert("type" in value.expr); + let typeObj = zigAnalysis.types[value.expr.type]; + + domFnProtoCode.innerHTML = exprName(value.expr, { + wantHtml: true, + wantLink: true, + fnDecl, + }); + + let docsSource = null; + let srcNode = zigAnalysis.astNodes[fnDecl.src]; + if (srcNode.docs != null) { + docsSource = srcNode.docs; + } + + renderFnParamDocs(fnDecl, typeObj); + + let retExpr = resolveValue({ expr: typeObj.ret }).expr; + if ("type" in retExpr) { + let retIndex = retExpr.type; + let errSetTypeIndex = null; + let retType = zigAnalysis.types[retIndex]; + if (retType.kind === typeKinds.ErrorSet) { + errSetTypeIndex = retIndex; + } else if (retType.kind === typeKinds.ErrorUnion) { + errSetTypeIndex = retType.err.type; + } + if (errSetTypeIndex != null) { + let errSetType = zigAnalysis.types[errSetTypeIndex]; + renderErrorSet(errSetType); + } + } + + let protoSrcIndex = fnDecl.src; + if (typeIsGenericFn(value.expr.type)) { + // does the generic_ret contain a container? + var resolvedGenericRet = resolveValue({ expr: typeObj.generic_ret }); + + if ("call" in resolvedGenericRet.expr) { + let call = zigAnalysis.calls[resolvedGenericRet.expr.call]; + let resolvedFunc = resolveValue({ expr: call.func }); + if (!("type" in resolvedFunc.expr)) return; + let callee = zigAnalysis.types[resolvedFunc.expr.type]; + if (!callee.generic_ret) return; + resolvedGenericRet = resolveValue({ expr: callee.generic_ret }); + } + + // TODO: see if unwrapping the `as` here is a good idea or not. + if ("as" in resolvedGenericRet.expr) { + resolvedGenericRet = { + expr: zigAnalysis.exprs[resolvedGenericRet.expr.as.exprArg], + }; + } + + if (!("type" in resolvedGenericRet.expr)) return; + const genericType = zigAnalysis.types[resolvedGenericRet.expr.type]; + if (isContainerType(genericType)) { + renderContainer(genericType); + } + + // old code + // let instantiations = nodesToFnsMap[protoSrcIndex]; + // let calls = nodesToCallsMap[protoSrcIndex]; + // if (instantiations == null && calls == null) { + // domFnNoExamples.classList.remove("hidden"); + // } else if (calls != null) { + // // if (fnObj.combined === undefined) fnObj.combined = allCompTimeFnCallsResult(calls); + // if (fnObj.combined != null) renderContainer(fnObj.combined); + + // resizeDomList(domListFnExamples, calls.length, ''); + + // for (let callI = 0; callI < calls.length; callI += 1) { + // let liDom = domListFnExamples.children[callI]; + // liDom.innerHTML = getCallHtml(fnDecl, calls[callI]); + // } + + // domFnExamples.classList.remove("hidden"); + // } else if (instantiations != null) { + // // TODO + // } + } else { + domFnExamples.classList.add("hidden"); + domFnNoExamples.classList.add("hidden"); + } + + let protoSrcNode = zigAnalysis.astNodes[protoSrcIndex]; + if ( + docsSource == null && + protoSrcNode != null && + protoSrcNode.docs != null + ) { + docsSource = protoSrcNode.docs; + } + if (docsSource != null) { + domTldDocs.innerHTML = markdown(docsSource); + domTldDocs.classList.remove("hidden"); + } + domFnProto.classList.remove("hidden"); + } + + function renderFnParamDocs(fnDecl, typeObj) { + let docCount = 0; + + let fnNode = zigAnalysis.astNodes[fnDecl.src]; + let fields = fnNode.fields; + let isVarArgs = fnNode.varArgs; + + for (let i = 0; i < fields.length; i += 1) { + let field = fields[i]; + let fieldNode = zigAnalysis.astNodes[field]; + if (fieldNode.docs != null) { + docCount += 1; + } + } + if (docCount == 0) { + return; + } + + resizeDomList(domListParams, docCount, ""); + let domIndex = 0; + + for (let i = 0; i < fields.length; i += 1) { + let field = fields[i]; + let fieldNode = zigAnalysis.astNodes[field]; + let docs = fieldNode.docs; + if (fieldNode.docs == null) { + continue; + } + let docsNonEmpty = docs !== ""; + let divDom = domListParams.children[domIndex]; + domIndex += 1; + + let value = typeObj.params[i]; + let preClass = docsNonEmpty ? ' class="fieldHasDocs"' : ""; + let html = "" + escapeHtml(fieldNode.name) + ": ";
+ if (isVarArgs && i === typeObj.params.length - 1) {
+ html += "...";
+ } else {
+ let name = exprName(value, { wantHtml: false, wantLink: false });
+ html += '' + name + "";
+ }
+
+ html += ",
";
+
+ if (docsNonEmpty) {
+ html += '' + escapeHtml(fieldName); - - if (container.kind === typeKinds.Enum) { - html += ' = ' + fieldName + ''; - } else { - let fieldTypeExpr = container.fields[i]; - html += ": "; - let name = exprName(fieldTypeExpr, false, false); - html += ''+ name +''; - let tsn = typeShorthandName(fieldTypeExpr); - if (tsn) { - html += ' ('+ tsn +')'; - - } - } - - html += ',
' + + escapeHtml(fieldName); + + if (container.kind === typeKinds.Enum) { + html += ' = ' + fieldName + ""; + } else { + let fieldTypeExpr = container.fields[i]; + html += ": "; + let name = exprName(fieldTypeExpr, false, false); + html += '' + name + ""; + let tsn = typeShorthandName(fieldTypeExpr); + if (tsn) { + html += " (" + tsn + ")"; + } } - // PHASE 2: - // Render HTML from markdown lines. - // Look at each line and emit fitting HTML code + html += ",
- // [{INLINE}]({URL}) :
- // ![{TEXT}]({URL}) :
- // [[std;format.fmt]] : (inner link)
+ if (varsList.length !== 0) {
+ resizeDomList(
+ domListGlobalVars,
+ varsList.length,
+ ' '
+ );
+ for (let i = 0; i < varsList.length; i += 1) {
+ let decl = varsList[i];
+ let trDom = domListGlobalVars.children[i];
-
-
- const formats = [
- {
- marker: "**",
- tag: "strong",
- },
- {
- marker: "~~",
- tag: "s",
- },
- {
- marker: "__",
- tag: "u",
- },
- {
- marker: "*",
- tag: "em",
- }
- ];
+ let tdName = trDom.children[0];
+ let tdNameA = tdName.children[0];
+ let tdType = trDom.children[1];
+ let tdDesc = trDom.children[2];
-
- const stack = [];
+ tdNameA.setAttribute("href", navLinkDecl(decl.name));
+ tdNameA.textContent = decl.name;
- let innerHTML = "";
- let currentRun = "";
+ tdType.innerHTML = typeValueName(typeOfDecl(decl), true, true);
- function flushRun() {
- if (currentRun != "") {
- innerHTML += escapeHtml(currentRun);
- }
- currentRun = "";
+ let docs = zigAnalysis.astNodes[decl.src].docs;
+ if (docs != null) {
+ tdDesc.innerHTML = shortDescMarkdown(docs);
+ } else {
+ tdDesc.textContent = "";
+ }
+ }
+ domSectGlobalVars.classList.remove("hidden");
+ }
+
+ if (valsList.length !== 0) {
+ resizeDomList(
+ domListValues,
+ valsList.length,
+ ' '
+ );
+ for (let i = 0; i < valsList.length; i += 1) {
+ let decl = valsList[i];
+ let trDom = domListValues.children[i];
+
+ let tdName = trDom.children[0];
+ let tdNameA = tdName.children[0];
+ let tdType = trDom.children[1];
+ let tdDesc = trDom.children[2];
+
+ tdNameA.setAttribute("href", navLinkDecl(decl.name));
+ tdNameA.textContent = decl.name;
+
+ tdType.innerHTML = exprName(walkResultTypeRef(decl.value), {
+ wantHtml: true,
+ wantLink: true,
+ });
+
+ let docs = zigAnalysis.astNodes[decl.src].docs;
+ if (docs != null) {
+ tdDesc.innerHTML = shortDescMarkdown(docs);
+ } else {
+ tdDesc.textContent = "";
+ }
+ }
+ domSectValues.classList.remove("hidden");
+ }
+
+ if (testsList.length !== 0) {
+ resizeDomList(
+ domListTests,
+ testsList.length,
+ ' '
+ );
+ for (let i = 0; i < testsList.length; i += 1) {
+ let decl = testsList[i];
+ let trDom = domListTests.children[i];
+
+ let tdName = trDom.children[0];
+ let tdNameA = tdName.children[0];
+ let tdType = trDom.children[1];
+ let tdDesc = trDom.children[2];
+
+ tdNameA.setAttribute("href", navLinkDecl(decl.name));
+ tdNameA.textContent = decl.name;
+
+ tdType.innerHTML = exprName(walkResultTypeRef(decl.value), {
+ wantHtml: true,
+ wantLink: true,
+ });
+
+ let docs = zigAnalysis.astNodes[decl.src].docs;
+ if (docs != null) {
+ tdDesc.innerHTML = shortDescMarkdown(docs);
+ } else {
+ tdDesc.textContent = "";
+ }
+ }
+ domSectTests.classList.remove("hidden");
+ }
+ }
+
+ function operatorCompare(a, b) {
+ if (a === b) {
+ return 0;
+ } else if (a < b) {
+ return -1;
+ } else {
+ return 1;
+ }
+ }
+
+ function detectRootIsStd() {
+ let rootPkg = zigAnalysis.packages[zigAnalysis.rootPkg];
+ if (rootPkg.table["std"] == null) {
+ // no std mapped into the root package
+ return false;
+ }
+ let stdPkg = zigAnalysis.packages[rootPkg.table["std"]];
+ if (stdPkg == null) return false;
+ return rootPkg.file === stdPkg.file;
+ }
+
+ function indexTypeKinds() {
+ let map = {};
+ for (let i = 0; i < zigAnalysis.typeKinds.length; i += 1) {
+ map[zigAnalysis.typeKinds[i]] = i;
+ }
+ // This is just for debugging purposes, not needed to function
+ let assertList = [
+ "Type",
+ "Void",
+ "Bool",
+ "NoReturn",
+ "Int",
+ "Float",
+ "Pointer",
+ "Array",
+ "Struct",
+ "ComptimeFloat",
+ "ComptimeInt",
+ "Undefined",
+ "Null",
+ "Optional",
+ "ErrorUnion",
+ "ErrorSet",
+ "Enum",
+ "Union",
+ "Fn",
+ "BoundFn",
+ "Opaque",
+ "Frame",
+ "AnyFrame",
+ "Vector",
+ "EnumLiteral",
+ ];
+ for (let i = 0; i < assertList.length; i += 1) {
+ if (map[assertList[i]] == null)
+ throw new Error("No type kind '" + assertList[i] + "' found");
+ }
+ return map;
+ }
+
+ function findTypeTypeId() {
+ for (let i = 0; i < zigAnalysis.types.length; i += 1) {
+ if (zigAnalysis.types[i].kind == typeKinds.Type) {
+ return i;
+ }
+ }
+ throw new Error("No type 'type' found");
+ }
+
+ function updateCurNav() {
+ curNav = {
+ showPrivDecls: false,
+ pkgNames: [],
+ pkgObjs: [],
+ declNames: [],
+ declObjs: [],
+ callName: null,
+ };
+ curNavSearch = "";
+
+ if (location.hash[0] === "#" && location.hash.length > 1) {
+ let query = location.hash.substring(1);
+ if (query[0] === "*") {
+ curNav.showPrivDecls = true;
+ query = query.substring(1);
+ }
+
+ let qpos = query.indexOf("?");
+ let nonSearchPart;
+ if (qpos === -1) {
+ nonSearchPart = query;
+ } else {
+ nonSearchPart = query.substring(0, qpos);
+ curNavSearch = decodeURIComponent(query.substring(qpos + 1));
+ }
+
+ let parts = nonSearchPart.split(";");
+ curNav.pkgNames = decodeURIComponent(parts[0]).split(".");
+ if (parts[1] != null) {
+ curNav.declNames = decodeURIComponent(parts[1]).split(".");
+ }
+ }
+ }
+
+ function onHashChange() {
+ updateCurNav();
+ if (domSearch.value !== curNavSearch) {
+ domSearch.value = curNavSearch;
+ if (domSearch.value.length == 0)
+ domSearchPlaceholder.classList.remove("hidden");
+ else
+ domSearchPlaceholder.classList.add("hidden");
+ }
+ render();
+ if (imFeelingLucky) {
+ imFeelingLucky = false;
+ activateSelectedResult();
+ }
+ }
+
+ function findSubDecl(parentType, childName) {
+ {
+ // Generic functions
+ if ("value" in parentType) {
+ const rv = resolveValue(parentType.value);
+ if ("type" in rv.expr) {
+ const t = zigAnalysis.types[rv.expr.type];
+ if (t.kind == typeKinds.Fn && t.generic_ret != null) {
+ const rgr = resolveValue({ expr: t.generic_ret });
+ if ("type" in rgr.expr) {
+ parentType = zigAnalysis.types[rgr.expr.type];
}
+ }
+ }
+ }
+ }
- let parsing_code = false;
- let codetag = "";
- let in_code = false;
+ if (!parentType.pubDecls) return null;
+ for (let i = 0; i < parentType.pubDecls.length; i += 1) {
+ let declIndex = parentType.pubDecls[i];
+ let childDecl = zigAnalysis.decls[declIndex];
+ if (childDecl.name === childName) {
+ return childDecl;
+ }
+ }
+ if (!parentType.privDecls) return null;
+ for (let i = 0; i < parentType.privDecls.length; i += 1) {
+ let declIndex = parentType.privDecls[i];
+ let childDecl = zigAnalysis.decls[declIndex];
+ if (childDecl.name === childName) {
+ return childDecl;
+ }
+ }
+ return null;
+ }
- for (let i = 0; i < innerText.length; i++) {
+ function computeCanonicalPackagePaths() {
+ let list = new Array(zigAnalysis.packages.length);
+ // Now we try to find all the packages from root.
+ let rootPkg = zigAnalysis.packages[zigAnalysis.rootPkg];
+ // Breadth-first to keep the path shortest possible.
+ let stack = [
+ {
+ path: [],
+ pkg: rootPkg,
+ },
+ ];
+ while (stack.length !== 0) {
+ let item = stack.shift();
+ for (let key in item.pkg.table) {
+ let childPkgIndex = item.pkg.table[key];
+ if (list[childPkgIndex] != null) continue;
+ let childPkg = zigAnalysis.packages[childPkgIndex];
+ if (childPkg == null) continue;
- if (parsing_code && in_code) {
- if (innerText.substr(i, codetag.length) == codetag) {
- // remove leading and trailing whitespace if string both starts and ends with one.
- if (currentRun[0] == " " && currentRun[currentRun.length - 1] == " ") {
- currentRun = currentRun.substr(1, currentRun.length - 2);
- }
- flushRun();
- i += codetag.length - 1;
- in_code = false;
- parsing_code = false;
- innerHTML += "
";
- codetag = "";
- } else {
- currentRun += innerText[i];
- }
- continue;
+ let newPath = item.path.concat([key]);
+ list[childPkgIndex] = newPath;
+ stack.push({
+ path: newPath,
+ pkg: childPkg,
+ });
+ }
+ }
+ return list;
+ }
+
+ function computeCanonDeclPaths() {
+ let list = new Array(zigAnalysis.decls.length);
+ canonTypeDecls = new Array(zigAnalysis.types.length);
+
+ for (let pkgI = 0; pkgI < zigAnalysis.packages.length; pkgI += 1) {
+ if (pkgI === zigAnalysis.rootPkg && rootIsStd) continue;
+ let pkg = zigAnalysis.packages[pkgI];
+ let pkgNames = canonPkgPaths[pkgI];
+ if (pkgNames === undefined) continue;
+
+ let stack = [
+ {
+ declNames: [],
+ type: zigAnalysis.types[pkg.main],
+ },
+ ];
+ while (stack.length !== 0) {
+ let item = stack.shift();
+
+ if (isContainerType(item.type)) {
+ let t = item.type;
+
+ let len = t.pubDecls ? t.pubDecls.length : 0;
+ for (let declI = 0; declI < len; declI += 1) {
+ let mainDeclIndex = t.pubDecls[declI];
+ if (list[mainDeclIndex] != null) continue;
+
+ let decl = zigAnalysis.decls[mainDeclIndex];
+ let declVal = resolveValue(decl.value);
+ let declNames = item.declNames.concat([decl.name]);
+ list[mainDeclIndex] = {
+ pkgNames: pkgNames,
+ declNames: declNames,
+ };
+ if ("type" in declVal.expr) {
+ let value = zigAnalysis.types[declVal.expr.type];
+ if (declCanRepresentTypeKind(value.kind)) {
+ canonTypeDecls[declVal.type] = mainDeclIndex;
+ }
+
+ if (isContainerType(value)) {
+ stack.push({
+ declNames: declNames,
+ type: value,
+ });
+ }
+
+ // Generic function
+ if (value.kind == typeKinds.Fn && value.generic_ret != null) {
+ let resolvedVal = resolveValue({ expr: value.generic_ret });
+ if ("type" in resolvedVal.expr) {
+ let generic_type = zigAnalysis.types[resolvedVal.expr.type];
+ if (isContainerType(generic_type)) {
+ stack.push({
+ declNames: declNames,
+ type: generic_type,
+ });
+ }
}
+ }
+ }
+ }
+ }
+ }
+ }
+ return list;
+ }
- if (innerText[i] == "`") {
- flushRun();
- if (!parsing_code) {
- innerHTML += "";
- }
- parsing_code = true;
- codetag += "`";
- continue;
- }
+ function getCanonDeclPath(index) {
+ if (canonDeclPaths == null) {
+ canonDeclPaths = computeCanonDeclPaths();
+ }
+ //let cd = (canonDeclPaths);
+ return canonDeclPaths[index];
+ }
- if (parsing_code) {
- currentRun += innerText[i];
- in_code = true;
- } else {
- let any = false;
- for (let idx = (stack.length > 0 ? -1 : 0); idx < formats.length; idx++) {
- const fmt = idx >= 0 ? formats[idx] : stack[stack.length - 1];
- if (innerText.substr(i, fmt.marker.length) == fmt.marker) {
- flushRun();
- if (stack[stack.length - 1] == fmt) {
- stack.pop();
- innerHTML += "" + fmt.tag + ">";
- } else {
- stack.push(fmt);
- innerHTML += "<" + fmt.tag + ">";
- }
- i += fmt.marker.length - 1;
- any = true;
- break;
- }
- }
- if (!any) {
- currentRun += innerText[i];
- }
- }
+ function getCanonTypeDecl(index) {
+ getCanonDeclPath(0);
+ //let ct = (canonTypeDecls);
+ return canonTypeDecls[index];
+ }
+
+ function escapeHtml(text) {
+ return text.replace(/[&"<>]/g, function (m) {
+ return escapeHtmlReplacements[m];
+ });
+ }
+
+ function shortDescMarkdown(docs) {
+ const trimmed_docs = docs.trim();
+ let index = trimmed_docs.indexOf("\n\n");
+ let cut = false;
+
+ if (index < 0 || index > 80) {
+ if (trimmed_docs.length > 80) {
+ index = 80;
+ cut = true;
+ } else {
+ index = trimmed_docs.length;
+ }
+ }
+
+ let slice = trimmed_docs.slice(0, index);
+ if (cut) slice += "...";
+ return markdown(slice);
+ }
+
+ function markdown(input) {
+ const raw_lines = input.split("\n"); // zig allows no '\r', so we don't need to split on CR
+
+ const lines = [];
+
+ // PHASE 1:
+ // Dissect lines and determine the type for each line.
+ // Also computes indentation level and removes unnecessary whitespace
+
+ let is_reading_code = false;
+ let code_indent = 0;
+ for (let line_no = 0; line_no < raw_lines.length; line_no++) {
+ const raw_line = raw_lines[line_no];
+
+ const line = {
+ indent: 0,
+ raw_text: raw_line,
+ text: raw_line.trim(),
+ type: "p", // p, h1 … h6, code, ul, ol, blockquote, skip, empty
+ ordered_number: -1, // NOTE: hack to make the type checker happy
+ };
+
+ if (!is_reading_code) {
+ while (
+ line.indent < line.raw_text.length &&
+ line.raw_text[line.indent] == " "
+ ) {
+ line.indent += 1;
+ }
+
+ if (line.text.startsWith("######")) {
+ line.type = "h6";
+ line.text = line.text.substr(6);
+ } else if (line.text.startsWith("#####")) {
+ line.type = "h5";
+ line.text = line.text.substr(5);
+ } else if (line.text.startsWith("####")) {
+ line.type = "h4";
+ line.text = line.text.substr(4);
+ } else if (line.text.startsWith("###")) {
+ line.type = "h3";
+ line.text = line.text.substr(3);
+ } else if (line.text.startsWith("##")) {
+ line.type = "h2";
+ line.text = line.text.substr(2);
+ } else if (line.text.startsWith("#")) {
+ line.type = "h1";
+ line.text = line.text.substr(1);
+ } else if (line.text.startsWith("-")) {
+ line.type = "ul";
+ line.text = line.text.substr(1);
+ } else if (line.text.match(/^\d+\..*$/)) {
+ // if line starts with {number}{dot}
+ const match = line.text.match(/(\d+)\./);
+ line.type = "ul";
+ line.text = line.text.substr(match[0].length);
+ line.ordered_number = Number(match[1].length);
+ } else if (line.text == "```") {
+ line.type = "skip";
+ is_reading_code = true;
+ code_indent = line.indent;
+ } else if (line.text == "") {
+ line.type = "empty";
+ }
+ } else {
+ if (line.text == "```") {
+ is_reading_code = false;
+ line.type = "skip";
+ } else {
+ line.type = "code";
+ line.text = line.raw_text.substr(code_indent); // remove the indent of the ``` from all the code block
+ }
+ }
+
+ if (line.type != "skip") {
+ lines.push(line);
+ }
+ }
+
+ // PHASE 2:
+ // Render HTML from markdown lines.
+ // Look at each line and emit fitting HTML code
+
+ function markdownInlines(innerText) {
+ // inline types:
+ // **{INLINE}** :
+ // __{INLINE}__ :
+ // ~~{INLINE}~~ :
+ // *{INLINE}* :
+ // _{INLINE}_ :
+ // `{TEXT}` :
+ // [{INLINE}]({URL}) :
+ // ![{TEXT}]({URL}) :
+ // [[std;format.fmt]] : (inner link)
+
+ const formats = [
+ {
+ marker: "**",
+ tag: "strong",
+ },
+ {
+ marker: "~~",
+ tag: "s",
+ },
+ {
+ marker: "__",
+ tag: "u",
+ },
+ {
+ marker: "*",
+ tag: "em",
+ },
+ ];
+
+ const stack = [];
+
+ let innerHTML = "";
+ let currentRun = "";
+
+ function flushRun() {
+ if (currentRun != "") {
+ innerHTML += escapeHtml(currentRun);
+ }
+ currentRun = "";
+ }
+
+ let parsing_code = false;
+ let codetag = "";
+ let in_code = false;
+
+ for (let i = 0; i < innerText.length; i++) {
+ if (parsing_code && in_code) {
+ if (innerText.substr(i, codetag.length) == codetag) {
+ // remove leading and trailing whitespace if string both starts and ends with one.
+ if (
+ currentRun[0] == " " &&
+ currentRun[currentRun.length - 1] == " "
+ ) {
+ currentRun = currentRun.substr(1, currentRun.length - 2);
}
flushRun();
-
- while (stack.length > 0) {
- const fmt = (stack.pop());
- innerHTML += "" + fmt.tag + ">";
- }
-
- return innerHTML;
+ i += codetag.length - 1;
+ in_code = false;
+ parsing_code = false;
+ innerHTML += "
";
+ codetag = "";
+ } else {
+ currentRun += innerText[i];
+ }
+ continue;
}
-
- function previousLineIs(type, line_no) {
- if (line_no > 0) {
- return (lines[line_no - 1].type == type);
- } else {
- return false;
- }
+ if (innerText[i] == "`") {
+ flushRun();
+ if (!parsing_code) {
+ innerHTML += "";
+ }
+ parsing_code = true;
+ codetag += "`";
+ continue;
}
-
- function nextLineIs(type, line_no) {
- if (line_no < (lines.length - 1)) {
- return (lines[line_no + 1].type == type);
- } else {
- return false;
- }
- }
-
-
- function getPreviousLineIndent(line_no) {
- if (line_no > 0) {
- return lines[line_no - 1].indent;
- } else {
- return 0;
- }
- }
-
-
- function getNextLineIndent(line_no) {
- if (line_no < (lines.length - 1)) {
- return lines[line_no + 1].indent;
- } else {
- return 0;
- }
- }
-
- let html = "";
- for (let line_no = 0; line_no < lines.length; line_no++) {
- const line = lines[line_no];
-
-
-
- switch (line.type) {
- case "h1":
- case "h2":
- case "h3":
- case "h4":
- case "h5":
- case "h6":
- html += "<" + line.type + ">" + markdownInlines(line.text) + "" + line.type + ">\n";
- break;
-
- case "ul":
- case "ol":
- if (!previousLineIs("ul", line_no) || getPreviousLineIndent(line_no) < line.indent) {
- html += "<" + line.type + ">\n";
- }
-
- html += "" + markdownInlines(line.text) + " \n";
-
- if (!nextLineIs("ul", line_no) || getNextLineIndent(line_no) < line.indent) {
- html += "" + line.type + ">\n";
- }
- break;
-
- case "p":
- if (!previousLineIs("p", line_no)) {
- html += "\n";
- }
- html += markdownInlines(line.text) + "\n";
- if (!nextLineIs("p", line_no)) {
- html += "
\n";
- }
- break;
-
- case "code":
- if (!previousLineIs("code", line_no)) {
- html += "";
- }
- html += escapeHtml(line.text) + "\n";
- if (!nextLineIs("code", line_no)) {
- html += "
\n";
- }
- break;
- }
- }
-
- return html;
- }
-
- function activateSelectedResult() {
- if (domSectSearchResults.classList.contains("hidden")) {
- return;
- }
-
- let liDom = domListSearchResults.children[curSearchIndex];
- if (liDom == null && domListSearchResults.children.length !== 0) {
- liDom = domListSearchResults.children[0];
- }
- if (liDom != null) {
- let aDom = liDom.children[0];
- location.href = (aDom.getAttribute("href"));
- curSearchIndex = -1;
- }
- domSearch.blur();
- }
-
-
- function onSearchKeyDown(ev) {
- switch (getKeyString(ev)) {
- case "Enter":
- // detect if this search changes anything
- let terms1 = getSearchTerms();
- startSearch();
- updateCurNav();
- let terms2 = getSearchTerms();
- // we might have to wait for onHashChange to trigger
- imFeelingLucky = (terms1.join(' ') !== terms2.join(' '));
- if (!imFeelingLucky) activateSelectedResult();
-
- ev.preventDefault();
- ev.stopPropagation();
- return;
- case "Esc":
- domSearch.value = "";
- domSearch.blur();
- curSearchIndex = -1;
- ev.preventDefault();
- ev.stopPropagation();
- startSearch();
- return;
- case "Up":
- moveSearchCursor(-1);
- ev.preventDefault();
- ev.stopPropagation();
- return;
- case "Down":
- moveSearchCursor(1);
- ev.preventDefault();
- ev.stopPropagation();
- return;
- default:
- if (ev.shiftKey || ev.ctrlKey || ev.altKey) return;
-
- curSearchIndex = -1;
- ev.stopPropagation();
- startAsyncSearch();
- return;
- }
- }
-
-
-
- function moveSearchCursor(dir) {
- if (curSearchIndex < 0 || curSearchIndex >= domListSearchResults.children.length) {
- if (dir > 0) {
- curSearchIndex = -1 + dir;
- } else if (dir < 0) {
- curSearchIndex = domListSearchResults.children.length + dir;
- }
+ if (parsing_code) {
+ currentRun += innerText[i];
+ in_code = true;
} else {
- curSearchIndex += dir;
+ let any = false;
+ for (
+ let idx = stack.length > 0 ? -1 : 0;
+ idx < formats.length;
+ idx++
+ ) {
+ const fmt = idx >= 0 ? formats[idx] : stack[stack.length - 1];
+ if (innerText.substr(i, fmt.marker.length) == fmt.marker) {
+ flushRun();
+ if (stack[stack.length - 1] == fmt) {
+ stack.pop();
+ innerHTML += "" + fmt.tag + ">";
+ } else {
+ stack.push(fmt);
+ innerHTML += "<" + fmt.tag + ">";
+ }
+ i += fmt.marker.length - 1;
+ any = true;
+ break;
+ }
+ }
+ if (!any) {
+ currentRun += innerText[i];
+ }
}
- if (curSearchIndex < 0) {
- curSearchIndex = 0;
- }
- if (curSearchIndex >= domListSearchResults.children.length) {
- curSearchIndex = domListSearchResults.children.length - 1;
- }
- renderSearchCursor();
+ }
+ flushRun();
+
+ while (stack.length > 0) {
+ const fmt = stack.pop();
+ innerHTML += "" + fmt.tag + ">";
+ }
+
+ return innerHTML;
}
-
- function getKeyString(ev) {
- let name;
- let ignoreShift = false;
- switch (ev.which) {
- case 13:
- name = "Enter";
- break;
- case 27:
- name = "Esc";
- break;
- case 38:
- name = "Up";
- break;
- case 40:
- name = "Down";
- break;
- default:
- ignoreShift = true;
- name = (ev.key != null) ? ev.key : String.fromCharCode(ev.charCode || ev.keyCode);
- }
- if (!ignoreShift && ev.shiftKey) name = "Shift+" + name;
- if (ev.altKey) name = "Alt+" + name;
- if (ev.ctrlKey) name = "Ctrl+" + name;
- return name;
+ function previousLineIs(type, line_no) {
+ if (line_no > 0) {
+ return lines[line_no - 1].type == type;
+ } else {
+ return false;
+ }
}
-
- function onWindowKeyDown(ev) {
- switch (getKeyString(ev)) {
- case "Esc":
- if (!domHelpModal.classList.contains("hidden")) {
- domHelpModal.classList.add("hidden");
- ev.preventDefault();
- ev.stopPropagation();
- }
- break;
- case "s":
- domSearch.focus();
- domSearch.select();
- ev.preventDefault();
- ev.stopPropagation();
- startAsyncSearch();
- break;
- case "?":
- ev.preventDefault();
- ev.stopPropagation();
- showHelpModal();
- break;
- }
+ function nextLineIs(type, line_no) {
+ if (line_no < lines.length - 1) {
+ return lines[line_no + 1].type == type;
+ } else {
+ return false;
+ }
}
-function showHelpModal() {
+ function getPreviousLineIndent(line_no) {
+ if (line_no > 0) {
+ return lines[line_no - 1].indent;
+ } else {
+ return 0;
+ }
+ }
+
+ function getNextLineIndent(line_no) {
+ if (line_no < lines.length - 1) {
+ return lines[line_no + 1].indent;
+ } else {
+ return 0;
+ }
+ }
+
+ let html = "";
+ for (let line_no = 0; line_no < lines.length; line_no++) {
+ const line = lines[line_no];
+
+ switch (line.type) {
+ case "h1":
+ case "h2":
+ case "h3":
+ case "h4":
+ case "h5":
+ case "h6":
+ html +=
+ "<" +
+ line.type +
+ ">" +
+ markdownInlines(line.text) +
+ "" +
+ line.type +
+ ">\n";
+ break;
+
+ case "ul":
+ case "ol":
+ if (
+ !previousLineIs("ul", line_no) ||
+ getPreviousLineIndent(line_no) < line.indent
+ ) {
+ html += "<" + line.type + ">\n";
+ }
+
+ html += "" + markdownInlines(line.text) + " \n";
+
+ if (
+ !nextLineIs("ul", line_no) ||
+ getNextLineIndent(line_no) < line.indent
+ ) {
+ html += "" + line.type + ">\n";
+ }
+ break;
+
+ case "p":
+ if (!previousLineIs("p", line_no)) {
+ html += "\n";
+ }
+ html += markdownInlines(line.text) + "\n";
+ if (!nextLineIs("p", line_no)) {
+ html += "
\n";
+ }
+ break;
+
+ case "code":
+ if (!previousLineIs("code", line_no)) {
+ html += "";
+ }
+ html += escapeHtml(line.text) + "\n";
+ if (!nextLineIs("code", line_no)) {
+ html += "
\n";
+ }
+ break;
+ }
+ }
+
+ return html;
+ }
+
+ function activateSelectedResult() {
+ if (domSectSearchResults.classList.contains("hidden")) {
+ return;
+ }
+
+ let liDom = domListSearchResults.children[curSearchIndex];
+ if (liDom == null && domListSearchResults.children.length !== 0) {
+ liDom = domListSearchResults.children[0];
+ }
+ if (liDom != null) {
+ let aDom = liDom.children[0];
+ location.href = aDom.getAttribute("href");
+ curSearchIndex = -1;
+ }
+ domSearch.blur();
+ }
+
+ // hide the modal if it's visible or return to the previous result page and unfocus the search
+ function onEscape(ev) {
+ if (!domHelpModal.classList.contains("hidden")) {
+ domHelpModal.classList.add("hidden");
+ ev.preventDefault();
+ ev.stopPropagation();
+ } else {
+ domSearch.value = "";
+ domSearch.blur();
+ domSearchPlaceholder.classList.remove("hidden");
+ curSearchIndex = -1;
+ ev.preventDefault();
+ ev.stopPropagation();
+ startSearch();
+ }
+ }
+
+ function onSearchKeyDown(ev) {
+ switch (getKeyString(ev)) {
+ case "Enter":
+ // detect if this search changes anything
+ let terms1 = getSearchTerms();
+ startSearch();
+ updateCurNav();
+ let terms2 = getSearchTerms();
+ // we might have to wait for onHashChange to trigger
+ imFeelingLucky = terms1.join(" ") !== terms2.join(" ");
+ if (!imFeelingLucky) activateSelectedResult();
+
+ ev.preventDefault();
+ ev.stopPropagation();
+ return;
+ case "Esc":
+ onEscape(ev);
+ return
+ case "Up":
+ moveSearchCursor(-1);
+ ev.preventDefault();
+ ev.stopPropagation();
+ return;
+ case "Down":
+ // TODO: make the page scroll down if the search cursor is out of the screen
+ moveSearchCursor(1);
+ ev.preventDefault();
+ ev.stopPropagation();
+ return;
+ default:
+ if (ev.shiftKey || ev.ctrlKey || ev.altKey) return;
+
+ curSearchIndex = -1;
+ ev.stopPropagation();
+ startAsyncSearch();
+ return;
+ }
+ }
+
+ function moveSearchCursor(dir) {
+ if (
+ curSearchIndex < 0 ||
+ curSearchIndex >= domListSearchResults.children.length
+ ) {
+ if (dir > 0) {
+ curSearchIndex = -1 + dir;
+ } else if (dir < 0) {
+ curSearchIndex = domListSearchResults.children.length + dir;
+ }
+ } else {
+ curSearchIndex += dir;
+ }
+ if (curSearchIndex < 0) {
+ curSearchIndex = 0;
+ }
+ if (curSearchIndex >= domListSearchResults.children.length) {
+ curSearchIndex = domListSearchResults.children.length - 1;
+ }
+ renderSearchCursor();
+ }
+
+ function getKeyString(ev) {
+ let name;
+ let ignoreShift = false;
+ switch (ev.which) {
+ case 13:
+ name = "Enter";
+ break;
+ case 27:
+ name = "Esc";
+ break;
+ case 38:
+ name = "Up";
+ break;
+ case 40:
+ name = "Down";
+ break;
+ default:
+ ignoreShift = true;
+ name =
+ ev.key != null
+ ? ev.key
+ : String.fromCharCode(ev.charCode || ev.keyCode);
+ }
+ if (!ignoreShift && ev.shiftKey) name = "Shift+" + name;
+ if (ev.altKey) name = "Alt+" + name;
+ if (ev.ctrlKey) name = "Ctrl+" + name;
+ return name;
+ }
+
+ function onWindowKeyDown(ev) {
+ switch (getKeyString(ev)) {
+ case "Esc":
+ onEscape(ev);
+ break;
+ case "s":
+ if (domHelpModal.classList.contains("hidden")) {
+ if (ev.target == domSearch) break;
+
+ domSearch.focus();
+ domSearch.select();
+ domDocs.scrollTo(0, 0);
+ ev.preventDefault();
+ ev.stopPropagation();
+ startAsyncSearch();
+ }
+ break;
+ case "?":
+ ev.preventDefault();
+ ev.stopPropagation();
+ showHelpModal();
+ break;
+ }
+ }
+
+ function showHelpModal() {
domHelpModal.classList.remove("hidden");
- domHelpModal.style.left = (window.innerWidth / 2 - domHelpModal.clientWidth / 2) + "px";
- domHelpModal.style.top = (window.innerHeight / 2 - domHelpModal.clientHeight / 2) + "px";
+ domHelpModal.style.left =
+ window.innerWidth / 2 - domHelpModal.clientWidth / 2 + "px";
+ domHelpModal.style.top =
+ window.innerHeight / 2 - domHelpModal.clientHeight / 2 + "px";
domHelpModal.focus();
-}
+ domSearch.blur();
+ }
-function clearAsyncSearch() {
+ function clearAsyncSearch() {
if (searchTimer != null) {
- clearTimeout(searchTimer);
- searchTimer = null;
+ clearTimeout(searchTimer);
+ searchTimer = null;
}
-}
+ }
-function startAsyncSearch() {
+ function startAsyncSearch() {
clearAsyncSearch();
searchTimer = setTimeout(startSearch, 100);
-}
-function startSearch() {
+ }
+ function startSearch() {
clearAsyncSearch();
let oldHash = location.hash;
let parts = oldHash.split("?");
- let newPart2 = (domSearch.value === "") ? "" : ("?" + domSearch.value);
- location.hash = (parts.length === 1) ? (oldHash + newPart2) : (parts[0] + newPart2);
-}
-function getSearchTerms() {
+ let newPart2 = domSearch.value === "" ? "" : "?" + domSearch.value;
+ location.replace(parts.length === 1 ? oldHash + newPart2 : parts[0] + newPart2);
+ }
+ function getSearchTerms() {
let list = curNavSearch.trim().split(/[ \r\n\t]+/);
list.sort();
return list;
-}
-function renderSearch() {
+ }
+
+ function renderSearch() {
let matchedItems = [];
- let ignoreCase = (curNavSearch.toLowerCase() === curNavSearch);
+ let ignoreCase = curNavSearch.toLowerCase() === curNavSearch;
let terms = getSearchTerms();
- decl_loop: for (let declIndex = 0; declIndex < zigAnalysis.decls.length; declIndex += 1) {
- let canonPath = getCanonDeclPath(declIndex);
- if (canonPath == null) continue;
+ decl_loop: for (
+ let declIndex = 0;
+ declIndex < zigAnalysis.decls.length;
+ declIndex += 1
+ ) {
+ let canonPath = getCanonDeclPath(declIndex);
+ if (canonPath == null) continue;
- let decl = zigAnalysis.decls[declIndex];
- let lastPkgName = canonPath.pkgNames[canonPath.pkgNames.length - 1];
- let fullPathSearchText = lastPkgName + "." + canonPath.declNames.join('.');
- let astNode = zigAnalysis.astNodes[decl.src];
- let fileAndDocs = "" //zigAnalysis.files[astNode.file];
- // TODO: understand what this piece of code is trying to achieve
- // also right now `files` are expressed as a hashmap.
- if (astNode.docs != null) {
- fileAndDocs += "\n" + astNode.docs;
+ let decl = zigAnalysis.decls[declIndex];
+ let lastPkgName = canonPath.pkgNames[canonPath.pkgNames.length - 1];
+ let fullPathSearchText =
+ lastPkgName + "." + canonPath.declNames.join(".");
+ let astNode = zigAnalysis.astNodes[decl.src];
+ let fileAndDocs = ""; //zigAnalysis.files[astNode.file];
+ // TODO: understand what this piece of code is trying to achieve
+ // also right now `files` are expressed as a hashmap.
+ if (astNode.docs != null) {
+ fileAndDocs += "\n" + astNode.docs;
+ }
+ let fullPathSearchTextLower = fullPathSearchText;
+ if (ignoreCase) {
+ fullPathSearchTextLower = fullPathSearchTextLower.toLowerCase();
+ fileAndDocs = fileAndDocs.toLowerCase();
+ }
+
+ let points = 0;
+ for (let termIndex = 0; termIndex < terms.length; termIndex += 1) {
+ let term = terms[termIndex];
+
+ // exact, case sensitive match of full decl path
+ if (fullPathSearchText === term) {
+ points += 4;
+ continue;
}
- let fullPathSearchTextLower = fullPathSearchText;
- if (ignoreCase) {
- fullPathSearchTextLower = fullPathSearchTextLower.toLowerCase();
- fileAndDocs = fileAndDocs.toLowerCase();
+ // exact, case sensitive match of just decl name
+ if (decl.name == term) {
+ points += 3;
+ continue;
+ }
+ // substring, case insensitive match of full decl path
+ if (fullPathSearchTextLower.indexOf(term) >= 0) {
+ points += 2;
+ continue;
+ }
+ if (fileAndDocs.indexOf(term) >= 0) {
+ points += 1;
+ continue;
}
- let points = 0;
- for (let termIndex = 0; termIndex < terms.length; termIndex += 1) {
- let term = terms[termIndex];
+ continue decl_loop;
+ }
- // exact, case sensitive match of full decl path
- if (fullPathSearchText === term) {
- points += 4;
- continue;
- }
- // exact, case sensitive match of just decl name
- if (decl.name == term) {
- points += 3;
- continue;
- }
- // substring, case insensitive match of full decl path
- if (fullPathSearchTextLower.indexOf(term) >= 0) {
- points += 2;
- continue;
- }
- if (fileAndDocs.indexOf(term) >= 0) {
- points += 1;
- continue;
- }
-
- continue decl_loop;
- }
-
- matchedItems.push({
- decl: decl,
- path: canonPath,
- points: points,
- });
+ matchedItems.push({
+ decl: decl,
+ path: canonPath,
+ points: points,
+ });
}
if (matchedItems.length !== 0) {
- resizeDomList(domListSearchResults, matchedItems.length, ' ');
+ matchedItems.sort(function (a, b) {
+ let cmp = operatorCompare(b.points, a.points);
+ if (cmp != 0) return cmp;
+ return operatorCompare(a.decl.name, b.decl.name);
+ });
- matchedItems.sort(function(a, b) {
- let cmp = operatorCompare(b.points, a.points);
- if (cmp != 0) return cmp;
- return operatorCompare(a.decl.name, b.decl.name);
- });
+ let searchTrimmed = false;
+ const searchTrimResultsMaxItems = 200;
+ if (searchTrimResults && matchedItems.length > searchTrimResultsMaxItems) {
+ matchedItems = matchedItems.slice(0, searchTrimResultsMaxItems);
+ searchTrimmed = true;
+ }
- for (let i = 0; i < matchedItems.length; i += 1) {
- let liDom = domListSearchResults.children[i];
- let aDom = liDom.children[0];
- let match = matchedItems[i];
- let lastPkgName = match.path.pkgNames[match.path.pkgNames.length - 1];
- aDom.textContent = lastPkgName + "." + match.path.declNames.join('.');
- aDom.setAttribute('href', navLink(match.path.pkgNames, match.path.declNames));
- }
- renderSearchCursor();
+ // Build up the list of search results
+ let matchedItemsHTML = "";
- domSectSearchResults.classList.remove("hidden");
+ for (let i = 0; i < matchedItems.length; i += 1) {
+ const match = matchedItems[i];
+ const lastPkgName = match.path.pkgNames[match.path.pkgNames.length - 1];
+
+ const text = lastPkgName + "." + match.path.declNames.join(".");
+ const href = navLink(match.path.pkgNames, match.path.declNames);
+
+ matchedItemsHTML += "" + text + " ";
+ }
+
+ // Replace the search results using our newly constructed HTML string
+ domListSearchResults.innerHTML = matchedItemsHTML;
+ if (searchTrimmed) {
+ domSectSearchAllResultsLink.classList.remove("hidden");
+ }
+ renderSearchCursor();
+
+ domSectSearchResults.classList.remove("hidden");
} else {
- domSectSearchNoResults.classList.remove("hidden");
+ domSectSearchNoResults.classList.remove("hidden");
}
-}
+ }
-function renderSearchCursor() {
+ function renderSearchCursor() {
for (let i = 0; i < domListSearchResults.children.length; i += 1) {
- let liDom = (domListSearchResults.children[i]);
- if (curSearchIndex === i) {
- liDom.classList.add("selected");
- } else {
- liDom.classList.remove("selected");
- }
+ let liDom = domListSearchResults.children[i];
+ if (curSearchIndex === i) {
+ liDom.classList.add("selected");
+ } else {
+ liDom.classList.remove("selected");
+ }
}
-}
+ }
+ // function indexNodesToCalls() {
+ // let map = {};
+ // for (let i = 0; i < zigAnalysis.calls.length; i += 1) {
+ // let call = zigAnalysis.calls[i];
+ // let fn = zigAnalysis.fns[call.fn];
+ // if (map[fn.src] == null) {
+ // map[fn.src] = [i];
+ // } else {
+ // map[fn.src].push(i);
+ // }
+ // }
+ // return map;
+ // }
-
-// function indexNodesToCalls() {
-// let map = {};
-// for (let i = 0; i < zigAnalysis.calls.length; i += 1) {
-// let call = zigAnalysis.calls[i];
-// let fn = zigAnalysis.fns[call.fn];
-// if (map[fn.src] == null) {
-// map[fn.src] = [i];
-// } else {
-// map[fn.src].push(i);
-// }
-// }
-// return map;
-// }
-
-
-
-function byNameProperty(a, b) {
+ function byNameProperty(a, b) {
return operatorCompare(a.name, b.name);
-}
-
-
-
+ }
})();
diff --git a/lib/libc/glibc/abilists b/lib/libc/glibc/abilists
index 205d3c15fb..179e3f6f3f 100644
Binary files a/lib/libc/glibc/abilists and b/lib/libc/glibc/abilists differ
diff --git a/lib/std/Thread/Futex.zig b/lib/std/Thread/Futex.zig
index 0d9ccc8969..58f49c483e 100644
--- a/lib/std/Thread/Futex.zig
+++ b/lib/std/Thread/Futex.zig
@@ -703,7 +703,7 @@ const PosixImpl = struct {
const max_multiplier_bits = @bitSizeOf(usize);
const fibonacci_multiplier = 0x9E3779B97F4A7C15 >> (64 - max_multiplier_bits);
- const max_bucket_bits = @ctz(usize, buckets.len);
+ const max_bucket_bits = @ctz(buckets.len);
comptime assert(std.math.isPowerOfTwo(buckets.len));
const index = (address *% fibonacci_multiplier) >> (max_multiplier_bits - max_bucket_bits);
@@ -721,7 +721,7 @@ const PosixImpl = struct {
// then cut off the zero bits from the alignment to get the unique address.
const addr = @ptrToInt(ptr);
assert(addr & (alignment - 1) == 0);
- return addr >> @ctz(usize, alignment);
+ return addr >> @ctz(alignment);
}
};
diff --git a/lib/std/Thread/Mutex.zig b/lib/std/Thread/Mutex.zig
index 35d754ad19..973a312665 100644
--- a/lib/std/Thread/Mutex.zig
+++ b/lib/std/Thread/Mutex.zig
@@ -140,7 +140,7 @@ const FutexImpl = struct {
// - they both seem to mark the cache-line as modified regardless: https://stackoverflow.com/a/63350048
// - `lock bts` is smaller instruction-wise which makes it better for inlining
if (comptime builtin.target.cpu.arch.isX86()) {
- const locked_bit = @ctz(u32, @as(u32, locked));
+ const locked_bit = @ctz(@as(u32, locked));
return self.state.bitSet(locked_bit, .Acquire) == 0;
}
diff --git a/lib/std/Thread/RwLock.zig b/lib/std/Thread/RwLock.zig
index 201e1cc860..46d46cdfa4 100644
--- a/lib/std/Thread/RwLock.zig
+++ b/lib/std/Thread/RwLock.zig
@@ -168,8 +168,8 @@ pub const DefaultRwLock = struct {
const IS_WRITING: usize = 1;
const WRITER: usize = 1 << 1;
const READER: usize = 1 << (1 + @bitSizeOf(Count));
- const WRITER_MASK: usize = std.math.maxInt(Count) << @ctz(usize, WRITER);
- const READER_MASK: usize = std.math.maxInt(Count) << @ctz(usize, READER);
+ const WRITER_MASK: usize = std.math.maxInt(Count) << @ctz(WRITER);
+ const READER_MASK: usize = std.math.maxInt(Count) << @ctz(READER);
const Count = std.meta.Int(.unsigned, @divFloor(@bitSizeOf(usize) - 1, 2));
pub fn tryLock(rwl: *DefaultRwLock) bool {
diff --git a/lib/std/array_list.zig b/lib/std/array_list.zig
index 655bdeaa42..a8c53c3142 100644
--- a/lib/std/array_list.zig
+++ b/lib/std/array_list.zig
@@ -221,6 +221,30 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
mem.copy(T, self.items[old_len..], items);
}
+ /// Append an unaligned slice of items to the list. Allocates more
+ /// memory as necessary. Only call this function if calling
+ /// `appendSlice` instead would be a compile error.
+ pub fn appendUnalignedSlice(self: *Self, items: []align(1) const T) Allocator.Error!void {
+ try self.ensureUnusedCapacity(items.len);
+ self.appendUnalignedSliceAssumeCapacity(items);
+ }
+
+ /// Append the slice of items to the list, asserting the capacity is already
+ /// enough to store the new items. **Does not** invalidate pointers.
+ /// Only call this function if calling `appendSliceAssumeCapacity` instead
+ /// would be a compile error.
+ pub fn appendUnalignedSliceAssumeCapacity(self: *Self, items: []align(1) const T) void {
+ const old_len = self.items.len;
+ const new_len = old_len + items.len;
+ assert(new_len <= self.capacity);
+ self.items.len = new_len;
+ @memcpy(
+ @ptrCast([*]align(@alignOf(T)) u8, self.items.ptr + old_len),
+ @ptrCast([*]const u8, items.ptr),
+ items.len * @sizeOf(T),
+ );
+ }
+
pub const Writer = if (T != u8)
@compileError("The Writer interface is only defined for ArrayList(u8) " ++
"but the given type is ArrayList(" ++ @typeName(T) ++ ")")
@@ -592,6 +616,29 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
mem.copy(T, self.items[old_len..], items);
}
+ /// Append the slice of items to the list. Allocates more
+ /// memory as necessary. Only call this function if a call to `appendSlice` instead would
+ /// be a compile error.
+ pub fn appendUnalignedSlice(self: *Self, allocator: Allocator, items: []align(1) const T) Allocator.Error!void {
+ try self.ensureUnusedCapacity(allocator, items.len);
+ self.appendUnalignedSliceAssumeCapacity(items);
+ }
+
+ /// Append an unaligned slice of items to the list, asserting the capacity is enough
+ /// to store the new items. Only call this function if a call to `appendSliceAssumeCapacity`
+ /// instead would be a compile error.
+ pub fn appendUnalignedSliceAssumeCapacity(self: *Self, items: []align(1) const T) void {
+ const old_len = self.items.len;
+ const new_len = old_len + items.len;
+ assert(new_len <= self.capacity);
+ self.items.len = new_len;
+ @memcpy(
+ @ptrCast([*]align(@alignOf(T)) u8, self.items.ptr + old_len),
+ @ptrCast([*]const u8, items.ptr),
+ items.len * @sizeOf(T),
+ );
+ }
+
pub const WriterContext = struct {
self: *Self,
allocator: Allocator,
@@ -899,6 +946,14 @@ test "std.ArrayList/ArrayListUnmanaged.basic" {
try testing.expect(list.pop() == 1);
try testing.expect(list.items.len == 9);
+ var unaligned: [3]i32 align(1) = [_]i32{ 4, 5, 6 };
+ list.appendUnalignedSlice(&unaligned) catch unreachable;
+ try testing.expect(list.items.len == 12);
+ try testing.expect(list.pop() == 6);
+ try testing.expect(list.pop() == 5);
+ try testing.expect(list.pop() == 4);
+ try testing.expect(list.items.len == 9);
+
list.appendSlice(&[_]i32{}) catch unreachable;
try testing.expect(list.items.len == 9);
@@ -941,6 +996,14 @@ test "std.ArrayList/ArrayListUnmanaged.basic" {
try testing.expect(list.pop() == 1);
try testing.expect(list.items.len == 9);
+ var unaligned: [3]i32 align(1) = [_]i32{ 4, 5, 6 };
+ list.appendUnalignedSlice(a, &unaligned) catch unreachable;
+ try testing.expect(list.items.len == 12);
+ try testing.expect(list.pop() == 6);
+ try testing.expect(list.pop() == 5);
+ try testing.expect(list.pop() == 4);
+ try testing.expect(list.items.len == 9);
+
list.appendSlice(a, &[_]i32{}) catch unreachable;
try testing.expect(list.items.len == 9);
diff --git a/lib/std/bit_set.zig b/lib/std/bit_set.zig
index 8006a623b5..2da3f1ac21 100644
--- a/lib/std/bit_set.zig
+++ b/lib/std/bit_set.zig
@@ -91,7 +91,7 @@ pub fn IntegerBitSet(comptime size: u16) type {
/// Returns the total number of set bits in this bit set.
pub fn count(self: Self) usize {
- return @popCount(MaskInt, self.mask);
+ return @popCount(self.mask);
}
/// Changes the value of the specified bit of the bit
@@ -179,7 +179,7 @@ pub fn IntegerBitSet(comptime size: u16) type {
pub fn findFirstSet(self: Self) ?usize {
const mask = self.mask;
if (mask == 0) return null;
- return @ctz(MaskInt, mask);
+ return @ctz(mask);
}
/// Finds the index of the first set bit, and unsets it.
@@ -187,7 +187,7 @@ pub fn IntegerBitSet(comptime size: u16) type {
pub fn toggleFirstSet(self: *Self) ?usize {
const mask = self.mask;
if (mask == 0) return null;
- const index = @ctz(MaskInt, mask);
+ const index = @ctz(mask);
self.mask = mask & (mask - 1);
return index;
}
@@ -222,12 +222,12 @@ pub fn IntegerBitSet(comptime size: u16) type {
switch (direction) {
.forward => {
- const next_index = @ctz(MaskInt, self.bits_remain);
+ const next_index = @ctz(self.bits_remain);
self.bits_remain &= self.bits_remain - 1;
return next_index;
},
.reverse => {
- const leading_zeroes = @clz(MaskInt, self.bits_remain);
+ const leading_zeroes = @clz(self.bits_remain);
const top_bit = (@bitSizeOf(MaskInt) - 1) - leading_zeroes;
self.bits_remain &= (@as(MaskInt, 1) << @intCast(ShiftInt, top_bit)) - 1;
return top_bit;
@@ -347,7 +347,7 @@ pub fn ArrayBitSet(comptime MaskIntType: type, comptime size: usize) type {
pub fn count(self: Self) usize {
var total: usize = 0;
for (self.masks) |mask| {
- total += @popCount(MaskInt, mask);
+ total += @popCount(mask);
}
return total;
}
@@ -475,7 +475,7 @@ pub fn ArrayBitSet(comptime MaskIntType: type, comptime size: usize) type {
if (mask != 0) break mask;
offset += @bitSizeOf(MaskInt);
} else return null;
- return offset + @ctz(MaskInt, mask);
+ return offset + @ctz(mask);
}
/// Finds the index of the first set bit, and unsets it.
@@ -486,7 +486,7 @@ pub fn ArrayBitSet(comptime MaskIntType: type, comptime size: usize) type {
if (mask.* != 0) break mask;
offset += @bitSizeOf(MaskInt);
} else return null;
- const index = @ctz(MaskInt, mask.*);
+ const index = @ctz(mask.*);
mask.* &= (mask.* - 1);
return offset + index;
}
@@ -657,7 +657,7 @@ pub const DynamicBitSetUnmanaged = struct {
var total: usize = 0;
for (self.masks[0..num_masks]) |mask| {
// Note: This is where we depend on padding bits being zero
- total += @popCount(MaskInt, mask);
+ total += @popCount(mask);
}
return total;
}
@@ -795,7 +795,7 @@ pub const DynamicBitSetUnmanaged = struct {
mask += 1;
offset += @bitSizeOf(MaskInt);
} else return null;
- return offset + @ctz(MaskInt, mask[0]);
+ return offset + @ctz(mask[0]);
}
/// Finds the index of the first set bit, and unsets it.
@@ -808,7 +808,7 @@ pub const DynamicBitSetUnmanaged = struct {
mask += 1;
offset += @bitSizeOf(MaskInt);
} else return null;
- const index = @ctz(MaskInt, mask[0]);
+ const index = @ctz(mask[0]);
mask[0] &= (mask[0] - 1);
return offset + index;
}
@@ -1067,12 +1067,12 @@ fn BitSetIterator(comptime MaskInt: type, comptime options: IteratorOptions) typ
switch (direction) {
.forward => {
- const next_index = @ctz(MaskInt, self.bits_remain) + self.bit_offset;
+ const next_index = @ctz(self.bits_remain) + self.bit_offset;
self.bits_remain &= self.bits_remain - 1;
return next_index;
},
.reverse => {
- const leading_zeroes = @clz(MaskInt, self.bits_remain);
+ const leading_zeroes = @clz(self.bits_remain);
const top_bit = (@bitSizeOf(MaskInt) - 1) - leading_zeroes;
const no_top_bit_mask = (@as(MaskInt, 1) << @intCast(ShiftInt, top_bit)) - 1;
self.bits_remain &= no_top_bit_mask;
diff --git a/lib/std/bounded_array.zig b/lib/std/bounded_array.zig
index 0b0efc55e4..3d74e5e47f 100644
--- a/lib/std/bounded_array.zig
+++ b/lib/std/bounded_array.zig
@@ -15,16 +15,16 @@ const testing = std.testing;
/// var slice = a.slice(); // a slice of the 64-byte array
/// var a_clone = a; // creates a copy - the structure doesn't use any internal pointers
/// ```
-pub fn BoundedArray(comptime T: type, comptime capacity: usize) type {
+pub fn BoundedArray(comptime T: type, comptime buffer_capacity: usize) type {
return struct {
const Self = @This();
- buffer: [capacity]T = undefined,
+ buffer: [buffer_capacity]T = undefined,
len: usize = 0,
/// Set the actual length of the slice.
/// Returns error.Overflow if it exceeds the length of the backing array.
pub fn init(len: usize) error{Overflow}!Self {
- if (len > capacity) return error.Overflow;
+ if (len > buffer_capacity) return error.Overflow;
return Self{ .len = len };
}
@@ -41,7 +41,7 @@ pub fn BoundedArray(comptime T: type, comptime capacity: usize) type {
/// Adjust the slice's length to `len`.
/// Does not initialize added items if any.
pub fn resize(self: *Self, len: usize) error{Overflow}!void {
- if (len > capacity) return error.Overflow;
+ if (len > buffer_capacity) return error.Overflow;
self.len = len;
}
@@ -69,7 +69,7 @@ pub fn BoundedArray(comptime T: type, comptime capacity: usize) type {
/// Check that the slice can hold at least `additional_count` items.
pub fn ensureUnusedCapacity(self: Self, additional_count: usize) error{Overflow}!void {
- if (self.len + additional_count > capacity) {
+ if (self.len + additional_count > buffer_capacity) {
return error.Overflow;
}
}
@@ -83,7 +83,7 @@ pub fn BoundedArray(comptime T: type, comptime capacity: usize) type {
/// Increase length by 1, returning pointer to the new item.
/// Asserts that there is space for the new item.
pub fn addOneAssumeCapacity(self: *Self) *T {
- assert(self.len < capacity);
+ assert(self.len < buffer_capacity);
self.len += 1;
return &self.slice()[self.len - 1];
}
@@ -236,7 +236,7 @@ pub fn BoundedArray(comptime T: type, comptime capacity: usize) type {
pub fn appendNTimesAssumeCapacity(self: *Self, value: T, n: usize) void {
const old_len = self.len;
self.len += n;
- assert(self.len <= capacity);
+ assert(self.len <= buffer_capacity);
mem.set(T, self.slice()[old_len..self.len], value);
}
@@ -275,7 +275,7 @@ test "BoundedArray" {
try testing.expectEqualSlices(u8, &x, a.constSlice());
var a2 = a;
- try testing.expectEqualSlices(u8, a.constSlice(), a.constSlice());
+ try testing.expectEqualSlices(u8, a.constSlice(), a2.constSlice());
a2.set(0, 0);
try testing.expect(a.get(0) != a2.get(0));
diff --git a/lib/std/build.zig b/lib/std/build.zig
index b5defc2e9c..4c05586159 100644
--- a/lib/std/build.zig
+++ b/lib/std/build.zig
@@ -1495,6 +1495,7 @@ pub const LibExeObjStep = struct {
emit_h: bool = false,
bundle_compiler_rt: ?bool = null,
single_threaded: ?bool = null,
+ stack_protector: ?bool = null,
disable_stack_probing: bool,
disable_sanitize_c: bool,
sanitize_thread: bool,
@@ -1896,13 +1897,12 @@ pub const LibExeObjStep = struct {
/// When a binary cannot be ran through emulation or the option is disabled, a warning
/// will be printed and the binary will *NOT* be ran.
pub fn runEmulatable(exe: *LibExeObjStep) *EmulatableRunStep {
- assert(exe.kind == .exe or exe.kind == .text_exe);
+ assert(exe.kind == .exe or exe.kind == .test_exe);
- const run_step = EmulatableRunStep.create(exe.builder.fmt("run {s}", .{exe.step.name}), exe);
+ const run_step = EmulatableRunStep.create(exe.builder, exe.builder.fmt("run {s}", .{exe.step.name}), exe);
if (exe.vcpkg_bin_path) |path| {
- run_step.addPathDir(path);
+ RunStep.addPathDirInternal(&run_step.step, exe.builder, path);
}
-
return run_step;
}
@@ -2826,6 +2826,13 @@ pub const LibExeObjStep = struct {
if (self.disable_stack_probing) {
try zig_args.append("-fno-stack-check");
}
+ if (self.stack_protector) |stack_protector| {
+ if (stack_protector) {
+ try zig_args.append("-fstack-protector");
+ } else {
+ try zig_args.append("-fno-stack-protector");
+ }
+ }
if (self.red_zone) |red_zone| {
if (red_zone) {
try zig_args.append("-mred-zone");
diff --git a/lib/std/build/OptionsStep.zig b/lib/std/build/OptionsStep.zig
index 7b219a210e..89a0b3ab3e 100644
--- a/lib/std/build/OptionsStep.zig
+++ b/lib/std/build/OptionsStep.zig
@@ -171,6 +171,7 @@ fn printLiteral(out: anytype, val: anytype, indent: u8) !void {
.Void,
.Bool,
.Int,
+ .ComptimeInt,
.Float,
.Null,
=> try out.print("{any}", .{val}),
@@ -302,6 +303,7 @@ test "OptionsStep" {
options.addOption(usize, "option1", 1);
options.addOption(?usize, "option2", null);
options.addOption(?usize, "option3", 3);
+ options.addOption(comptime_int, "option4", 4);
options.addOption([]const u8, "string", "zigisthebest");
options.addOption(?[]const u8, "optional_string", null);
options.addOption([2][2]u16, "nested_array", nested_array);
@@ -314,6 +316,7 @@ test "OptionsStep" {
\\pub const option1: usize = 1;
\\pub const option2: ?usize = null;
\\pub const option3: ?usize = 3;
+ \\pub const option4: comptime_int = 4;
\\pub const string: []const u8 = "zigisthebest";
\\pub const optional_string: ?[]const u8 = null;
\\pub const nested_array: [2][2]u16 = [2][2]u16 {
diff --git a/lib/std/build/RunStep.zig b/lib/std/build/RunStep.zig
index 1e22fd10a3..168f5d9d58 100644
--- a/lib/std/build/RunStep.zig
+++ b/lib/std/build/RunStep.zig
@@ -101,7 +101,7 @@ pub fn addPathDir(self: *RunStep, search_path: []const u8) void {
}
/// For internal use only, users of `RunStep` should use `addPathDir` directly.
-fn addPathDirInternal(step: *Step, builder: *Builder, search_path: []const u8) void {
+pub fn addPathDirInternal(step: *Step, builder: *Builder, search_path: []const u8) void {
const env_map = getEnvMapInternal(step, builder.allocator);
const key = "PATH";
diff --git a/lib/std/build/TranslateCStep.zig b/lib/std/build/TranslateCStep.zig
index 1f9bee463c..7d7f2a62a4 100644
--- a/lib/std/build/TranslateCStep.zig
+++ b/lib/std/build/TranslateCStep.zig
@@ -21,6 +21,7 @@ output_dir: ?[]const u8,
out_basename: []const u8,
target: CrossTarget = CrossTarget{},
output_file: build.GeneratedFile,
+use_stage1: ?bool = null,
pub fn create(builder: *Builder, source: build.FileSource) *TranslateCStep {
const self = builder.allocator.create(TranslateCStep) catch unreachable;
@@ -91,6 +92,19 @@ fn make(step: *Step) !void {
try argv_list.append("-D");
try argv_list.append(c_macro);
}
+ if (self.use_stage1) |stage1| {
+ if (stage1) {
+ try argv_list.append("-fstage1");
+ } else {
+ try argv_list.append("-fno-stage1");
+ }
+ } else if (self.builder.use_stage1) |stage1| {
+ if (stage1) {
+ try argv_list.append("-fstage1");
+ } else {
+ try argv_list.append("-fno-stage1");
+ }
+ }
try argv_list.append(self.source.getPath(self.builder));
diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig
index 047c65439c..69312df838 100644
--- a/lib/std/builtin.zig
+++ b/lib/std/builtin.zig
@@ -294,6 +294,8 @@ pub const Type = union(enum) {
/// therefore must be kept in sync with the compiler implementation.
pub const Struct = struct {
layout: ContainerLayout,
+ /// Only valid if layout is .Packed
+ backing_integer: ?type = null,
fields: []const StructField,
decls: []const Declaration,
is_tuple: bool,
@@ -864,13 +866,12 @@ pub fn panicUnwrapError(st: ?*StackTrace, err: anyerror) noreturn {
pub fn panicOutOfBounds(index: usize, len: usize) noreturn {
@setCold(true);
- std.debug.panic("attempt to index out of bound: index {d}, len {d}", .{ index, len });
+ std.debug.panic("index out of bounds: index {d}, len {d}", .{ index, len });
}
-pub noinline fn returnError(maybe_st: ?*StackTrace) void {
+pub noinline fn returnError(st: *StackTrace) void {
@setCold(true);
@setRuntimeSafety(false);
- const st = maybe_st orelse return;
addErrRetTraceAddr(st, @returnAddress());
}
diff --git a/lib/std/c.zig b/lib/std/c.zig
index c33d7b35ab..7b018f62d5 100644
--- a/lib/std/c.zig
+++ b/lib/std/c.zig
@@ -20,7 +20,7 @@ pub const Tokenizer = tokenizer.Tokenizer;
/// If linking gnu libc (glibc), the `ok` value will be true if the target
/// version is greater than or equal to `glibc_version`.
/// If linking a libc other than these, returns `false`.
-pub fn versionCheck(glibc_version: std.builtin.Version) type {
+pub fn versionCheck(comptime glibc_version: std.builtin.Version) type {
return struct {
pub const ok = blk: {
if (!builtin.link_libc) break :blk false;
@@ -263,7 +263,11 @@ const PThreadForkFn = if (builtin.zig_backend == .stage1)
fn () callconv(.C) void
else
*const fn () callconv(.C) void;
-pub extern "c" fn pthread_key_create(key: *c.pthread_key_t, destructor: ?fn (value: *anyopaque) callconv(.C) void) c.E;
+pub extern "c" fn pthread_key_create(key: *c.pthread_key_t, destructor: ?PThreadKeyCreateFn) c.E;
+const PThreadKeyCreateFn = if (builtin.zig_backend == .stage1)
+ fn (value: *anyopaque) callconv(.C) void
+else
+ *const fn (value: *anyopaque) callconv(.C) void;
pub extern "c" fn pthread_key_delete(key: c.pthread_key_t) c.E;
pub extern "c" fn pthread_getspecific(key: c.pthread_key_t) ?*anyopaque;
pub extern "c" fn pthread_setspecific(key: c.pthread_key_t, value: ?*anyopaque) c_int;
diff --git a/lib/std/c/darwin.zig b/lib/std/c/darwin.zig
index e7fd3cde44..0a65fa5242 100644
--- a/lib/std/c/darwin.zig
+++ b/lib/std/c/darwin.zig
@@ -814,10 +814,10 @@ pub const sigset_t = u32;
pub const empty_sigset: sigset_t = 0;
pub const SIG = struct {
- pub const ERR = @intToPtr(?Sigaction.sigaction_fn, maxInt(usize));
- pub const DFL = @intToPtr(?Sigaction.sigaction_fn, 0);
- pub const IGN = @intToPtr(?Sigaction.sigaction_fn, 1);
- pub const HOLD = @intToPtr(?Sigaction.sigaction_fn, 5);
+ pub const ERR = @intToPtr(?Sigaction.handler_fn, maxInt(usize));
+ pub const DFL = @intToPtr(?Sigaction.handler_fn, 0);
+ pub const IGN = @intToPtr(?Sigaction.handler_fn, 1);
+ pub const HOLD = @intToPtr(?Sigaction.handler_fn, 5);
/// block specified signal set
pub const _BLOCK = 1;
diff --git a/lib/std/c/dragonfly.zig b/lib/std/c/dragonfly.zig
index 1a60f94a1e..5d7822e8e9 100644
--- a/lib/std/c/dragonfly.zig
+++ b/lib/std/c/dragonfly.zig
@@ -609,9 +609,9 @@ pub const S = struct {
pub const BADSIG = SIG.ERR;
pub const SIG = struct {
- pub const DFL = @intToPtr(?Sigaction.sigaction_fn, 0);
- pub const IGN = @intToPtr(?Sigaction.sigaction_fn, 1);
- pub const ERR = @intToPtr(?Sigaction.sigaction_fn, maxInt(usize));
+ pub const DFL = @intToPtr(?Sigaction.handler_fn, 0);
+ pub const IGN = @intToPtr(?Sigaction.handler_fn, 1);
+ pub const ERR = @intToPtr(?Sigaction.handler_fn, maxInt(usize));
pub const BLOCK = 1;
pub const UNBLOCK = 2;
diff --git a/lib/std/c/freebsd.zig b/lib/std/c/freebsd.zig
index dac239094e..6f01886986 100644
--- a/lib/std/c/freebsd.zig
+++ b/lib/std/c/freebsd.zig
@@ -670,9 +670,9 @@ pub const SIG = struct {
pub const UNBLOCK = 2;
pub const SETMASK = 3;
- pub const DFL = @intToPtr(?Sigaction.sigaction_fn, 0);
- pub const IGN = @intToPtr(?Sigaction.sigaction_fn, 1);
- pub const ERR = @intToPtr(?Sigaction.sigaction_fn, maxInt(usize));
+ pub const DFL = @intToPtr(?Sigaction.handler_fn, 0);
+ pub const IGN = @intToPtr(?Sigaction.handler_fn, 1);
+ pub const ERR = @intToPtr(?Sigaction.handler_fn, maxInt(usize));
pub const WORDS = 4;
pub const MAXSIG = 128;
diff --git a/lib/std/c/haiku.zig b/lib/std/c/haiku.zig
index 672f4fa4ba..28935ffa34 100644
--- a/lib/std/c/haiku.zig
+++ b/lib/std/c/haiku.zig
@@ -702,7 +702,7 @@ pub const T = struct {
pub const CSETAF = 0x8002;
pub const CSETAW = 0x8003;
pub const CWAITEVENT = 0x8004;
- pub const CSBRK = 08005;
+ pub const CSBRK = 0x8005;
pub const CFLSH = 0x8006;
pub const CXONC = 0x8007;
pub const CQUERYCONNECTED = 0x8008;
@@ -874,7 +874,7 @@ pub const S = struct {
pub const IFDIR = 0o040000;
pub const IFCHR = 0o020000;
pub const IFIFO = 0o010000;
- pub const INDEX_DIR = 04000000000;
+ pub const INDEX_DIR = 0o4000000000;
pub const IUMSK = 0o7777;
pub const ISUID = 0o4000;
diff --git a/lib/std/c/netbsd.zig b/lib/std/c/netbsd.zig
index 3de14da7a2..a8287033d7 100644
--- a/lib/std/c/netbsd.zig
+++ b/lib/std/c/netbsd.zig
@@ -910,9 +910,9 @@ pub const winsize = extern struct {
const NSIG = 32;
pub const SIG = struct {
- pub const DFL = @intToPtr(?Sigaction.sigaction_fn, 0);
- pub const IGN = @intToPtr(?Sigaction.sigaction_fn, 1);
- pub const ERR = @intToPtr(?Sigaction.sigaction_fn, maxInt(usize));
+ pub const DFL = @intToPtr(?Sigaction.handler_fn, 0);
+ pub const IGN = @intToPtr(?Sigaction.handler_fn, 1);
+ pub const ERR = @intToPtr(?Sigaction.handler_fn, maxInt(usize));
pub const WORDS = 4;
pub const MAXSIG = 128;
diff --git a/lib/std/c/openbsd.zig b/lib/std/c/openbsd.zig
index 0863cc5a5e..ddcb24ffd3 100644
--- a/lib/std/c/openbsd.zig
+++ b/lib/std/c/openbsd.zig
@@ -982,11 +982,11 @@ pub const winsize = extern struct {
const NSIG = 33;
pub const SIG = struct {
- pub const DFL = @intToPtr(?Sigaction.sigaction_fn, 0);
- pub const IGN = @intToPtr(?Sigaction.sigaction_fn, 1);
- pub const ERR = @intToPtr(?Sigaction.sigaction_fn, maxInt(usize));
- pub const CATCH = @intToPtr(?Sigaction.sigaction_fn, 2);
- pub const HOLD = @intToPtr(?Sigaction.sigaction_fn, 3);
+ pub const DFL = @intToPtr(?Sigaction.handler_fn, 0);
+ pub const IGN = @intToPtr(?Sigaction.handler_fn, 1);
+ pub const ERR = @intToPtr(?Sigaction.handler_fn, maxInt(usize));
+ pub const CATCH = @intToPtr(?Sigaction.handler_fn, 2);
+ pub const HOLD = @intToPtr(?Sigaction.handler_fn, 3);
pub const HUP = 1;
pub const INT = 2;
@@ -1119,26 +1119,11 @@ pub usingnamespace switch (builtin.cpu.arch) {
sc_rsp: c_long,
sc_ss: c_long,
- sc_fpstate: fxsave64,
+ sc_fpstate: *anyopaque, // struct fxsave64 *
__sc_unused: c_int,
sc_mask: c_int,
sc_cookie: c_long,
};
-
- pub const fxsave64 = packed struct {
- fx_fcw: u16,
- fx_fsw: u16,
- fx_ftw: u8,
- fx_unused1: u8,
- fx_fop: u16,
- fx_rip: u64,
- fx_rdp: u64,
- fx_mxcsr: u32,
- fx_mxcsr_mask: u32,
- fx_st: [8][2]u64,
- fx_xmm: [16][2]u64,
- fx_unused3: [96]u8,
- };
},
else => struct {},
};
diff --git a/lib/std/c/solaris.zig b/lib/std/c/solaris.zig
index 61e52cfe30..1e726beb75 100644
--- a/lib/std/c/solaris.zig
+++ b/lib/std/c/solaris.zig
@@ -879,10 +879,10 @@ pub const winsize = extern struct {
const NSIG = 75;
pub const SIG = struct {
- pub const DFL = @intToPtr(?Sigaction.sigaction_fn, 0);
- pub const ERR = @intToPtr(?Sigaction.sigaction_fn, maxInt(usize));
- pub const IGN = @intToPtr(?Sigaction.sigaction_fn, 1);
- pub const HOLD = @intToPtr(?Sigaction.sigaction_fn, 2);
+ pub const DFL = @intToPtr(?Sigaction.handler_fn, 0);
+ pub const ERR = @intToPtr(?Sigaction.handler_fn, maxInt(usize));
+ pub const IGN = @intToPtr(?Sigaction.handler_fn, 1);
+ pub const HOLD = @intToPtr(?Sigaction.handler_fn, 2);
pub const WORDS = 4;
pub const MAXSIG = 75;
diff --git a/lib/std/coff.zig b/lib/std/coff.zig
index 7c077a9ec8..f9de318e7a 100644
--- a/lib/std/coff.zig
+++ b/lib/std/coff.zig
@@ -1,14 +1,731 @@
const std = @import("std.zig");
+const assert = std.debug.assert;
const io = std.io;
const mem = std.mem;
const os = std.os;
-const File = std.fs.File;
+const fs = std.fs;
-// CoffHeader.machine values
-// see https://msdn.microsoft.com/en-us/library/windows/desktop/ms680313(v=vs.85).aspx
-const IMAGE_FILE_MACHINE_I386 = 0x014c;
-const IMAGE_FILE_MACHINE_IA64 = 0x0200;
-const IMAGE_FILE_MACHINE_AMD64 = 0x8664;
+pub const CoffHeaderFlags = packed struct {
+ /// Image only, Windows CE, and Microsoft Windows NT and later.
+ /// This indicates that the file does not contain base relocations
+ /// and must therefore be loaded at its preferred base address.
+ /// If the base address is not available, the loader reports an error.
+ /// The default behavior of the linker is to strip base relocations
+ /// from executable (EXE) files.
+ RELOCS_STRIPPED: u1 = 0,
+
+ /// Image only. This indicates that the image file is valid and can be run.
+ /// If this flag is not set, it indicates a linker error.
+ EXECUTABLE_IMAGE: u1 = 0,
+
+ /// COFF line numbers have been removed. This flag is deprecated and should be zero.
+ LINE_NUMS_STRIPPED: u1 = 0,
+
+ /// COFF symbol table entries for local symbols have been removed.
+ /// This flag is deprecated and should be zero.
+ LOCAL_SYMS_STRIPPED: u1 = 0,
+
+ /// Obsolete. Aggressively trim working set.
+ /// This flag is deprecated for Windows 2000 and later and must be zero.
+ AGGRESSIVE_WS_TRIM: u1 = 0,
+
+ /// Application can handle > 2-GB addresses.
+ LARGE_ADDRESS_AWARE: u1 = 0,
+
+ /// This flag is reserved for future use.
+ RESERVED: u1 = 0,
+
+ /// Little endian: the least significant bit (LSB) precedes the
+ /// most significant bit (MSB) in memory. This flag is deprecated and should be zero.
+ BYTES_REVERSED_LO: u1 = 0,
+
+ /// Machine is based on a 32-bit-word architecture.
+ @"32BIT_MACHINE": u1 = 0,
+
+ /// Debugging information is removed from the image file.
+ DEBUG_STRIPPED: u1 = 0,
+
+ /// If the image is on removable media, fully load it and copy it to the swap file.
+ REMOVABLE_RUN_FROM_SWAP: u1 = 0,
+
+ /// If the image is on network media, fully load it and copy it to the swap file.
+ NET_RUN_FROM_SWAP: u1 = 0,
+
+ /// The image file is a system file, not a user program.
+ SYSTEM: u1 = 0,
+
+ /// The image file is a dynamic-link library (DLL).
+ /// Such files are considered executable files for almost all purposes,
+ /// although they cannot be directly run.
+ DLL: u1 = 0,
+
+ /// The file should be run only on a uniprocessor machine.
+ UP_SYSTEM_ONLY: u1 = 0,
+
+ /// Big endian: the MSB precedes the LSB in memory. This flag is deprecated and should be zero.
+ BYTES_REVERSED_HI: u1 = 0,
+};
+
+pub const CoffHeader = extern struct {
+ /// The number that identifies the type of target machine.
+ machine: MachineType,
+
+ /// The number of sections. This indicates the size of the section table, which immediately follows the headers.
+ number_of_sections: u16,
+
+ /// The low 32 bits of the number of seconds since 00:00 January 1, 1970 (a C run-time time_t value),
+ /// which indicates when the file was created.
+ time_date_stamp: u32,
+
+ /// The file offset of the COFF symbol table, or zero if no COFF symbol table is present.
+ /// This value should be zero for an image because COFF debugging information is deprecated.
+ pointer_to_symbol_table: u32,
+
+ /// The number of entries in the symbol table.
+ /// This data can be used to locate the string table, which immediately follows the symbol table.
+ /// This value should be zero for an image because COFF debugging information is deprecated.
+ number_of_symbols: u32,
+
+ /// The size of the optional header, which is required for executable files but not for object files.
+ /// This value should be zero for an object file. For a description of the header format, see Optional Header (Image Only).
+ size_of_optional_header: u16,
+
+ /// The flags that indicate the attributes of the file.
+ flags: CoffHeaderFlags,
+};
+
+// OptionalHeader.magic values
+// see https://msdn.microsoft.com/en-us/library/windows/desktop/ms680339(v=vs.85).aspx
+pub const IMAGE_NT_OPTIONAL_HDR32_MAGIC = 0x10b;
+pub const IMAGE_NT_OPTIONAL_HDR64_MAGIC = 0x20b;
+
+pub const DllFlags = packed struct {
+ _reserved_0: u5 = 0,
+
+ /// Image can handle a high entropy 64-bit virtual address space.
+ HIGH_ENTROPY_VA: u1 = 0,
+
+ /// DLL can be relocated at load time.
+ DYNAMIC_BASE: u1 = 0,
+
+ /// Code Integrity checks are enforced.
+ FORCE_INTEGRITY: u1 = 0,
+
+ /// Image is NX compatible.
+ NX_COMPAT: u1 = 0,
+
+ /// Isolation aware, but do not isolate the image.
+ NO_ISOLATION: u1 = 0,
+
+ /// Does not use structured exception (SE) handling. No SE handler may be called in this image.
+ NO_SEH: u1 = 0,
+
+ /// Do not bind the image.
+ NO_BIND: u1 = 0,
+
+ /// Image must execute in an AppContainer.
+ APPCONTAINER: u1 = 0,
+
+ /// A WDM driver.
+ WDM_DRIVER: u1 = 0,
+
+ /// Image supports Control Flow Guard.
+ GUARD_CF: u1 = 0,
+
+ /// Terminal Server aware.
+ TERMINAL_SERVER_AWARE: u1 = 0,
+};
+
+pub const Subsystem = enum(u16) {
+ /// An unknown subsystem
+ UNKNOWN = 0,
+
+ /// Device drivers and native Windows processes
+ NATIVE = 1,
+
+ /// The Windows graphical user interface (GUI) subsystem
+ WINDOWS_GUI = 2,
+
+ /// The Windows character subsystem
+ WINDOWS_CUI = 3,
+
+ /// The OS/2 character subsystem
+ OS2_CUI = 5,
+
+ /// The Posix character subsystem
+ POSIX_CUI = 7,
+
+ /// Native Win9x driver
+ NATIVE_WINDOWS = 8,
+
+ /// Windows CE
+ WINDOWS_CE_GUI = 9,
+
+ /// An Extensible Firmware Interface (EFI) application
+ EFI_APPLICATION = 10,
+
+ /// An EFI driver with boot services
+ EFI_BOOT_SERVICE_DRIVER = 11,
+
+ /// An EFI driver with run-time services
+ EFI_RUNTIME_DRIVER = 12,
+
+ /// An EFI ROM image
+ EFI_ROM = 13,
+
+ /// XBOX
+ XBOX = 14,
+
+ /// Windows boot application
+ WINDOWS_BOOT_APPLICATION = 16,
+};
+
+pub const OptionalHeader = extern struct {
+ magic: u16,
+ major_linker_version: u8,
+ minor_linker_version: u8,
+ size_of_code: u32,
+ size_of_initialized_data: u32,
+ size_of_uninitialized_data: u32,
+ address_of_entry_point: u32,
+ base_of_code: u32,
+};
+
+pub const OptionalHeaderPE32 = extern struct {
+ magic: u16,
+ major_linker_version: u8,
+ minor_linker_version: u8,
+ size_of_code: u32,
+ size_of_initialized_data: u32,
+ size_of_uninitialized_data: u32,
+ address_of_entry_point: u32,
+ base_of_code: u32,
+ base_of_data: u32,
+ image_base: u32,
+ section_alignment: u32,
+ file_alignment: u32,
+ major_operating_system_version: u16,
+ minor_operating_system_version: u16,
+ major_image_version: u16,
+ minor_image_version: u16,
+ major_subsystem_version: u16,
+ minor_subsystem_version: u16,
+ win32_version_value: u32,
+ size_of_image: u32,
+ size_of_headers: u32,
+ checksum: u32,
+ subsystem: Subsystem,
+ dll_flags: DllFlags,
+ size_of_stack_reserve: u32,
+ size_of_stack_commit: u32,
+ size_of_heap_reserve: u32,
+ size_of_heap_commit: u32,
+ loader_flags: u32,
+ number_of_rva_and_sizes: u32,
+};
+
+pub const OptionalHeaderPE64 = extern struct {
+ magic: u16,
+ major_linker_version: u8,
+ minor_linker_version: u8,
+ size_of_code: u32,
+ size_of_initialized_data: u32,
+ size_of_uninitialized_data: u32,
+ address_of_entry_point: u32,
+ base_of_code: u32,
+ image_base: u64,
+ section_alignment: u32,
+ file_alignment: u32,
+ major_operating_system_version: u16,
+ minor_operating_system_version: u16,
+ major_image_version: u16,
+ minor_image_version: u16,
+ major_subsystem_version: u16,
+ minor_subsystem_version: u16,
+ win32_version_value: u32,
+ size_of_image: u32,
+ size_of_headers: u32,
+ checksum: u32,
+ subsystem: Subsystem,
+ dll_flags: DllFlags,
+ size_of_stack_reserve: u64,
+ size_of_stack_commit: u64,
+ size_of_heap_reserve: u64,
+ size_of_heap_commit: u64,
+ loader_flags: u32,
+ number_of_rva_and_sizes: u32,
+};
+
+pub const DebugDirectoryEntry = extern struct {
+ characteristiccs: u32,
+ time_date_stamp: u32,
+ major_version: u16,
+ minor_version: u16,
+ @"type": u32,
+ size_of_data: u32,
+ address_of_raw_data: u32,
+ pointer_to_raw_data: u32,
+};
+
+pub const ImageDataDirectory = extern struct {
+ virtual_address: u32,
+ size: u32,
+};
+
+pub const SectionHeader = extern struct {
+ name: [8]u8,
+ virtual_size: u32,
+ virtual_address: u32,
+ size_of_raw_data: u32,
+ pointer_to_raw_data: u32,
+ pointer_to_relocations: u32,
+ pointer_to_linenumbers: u32,
+ number_of_relocations: u16,
+ number_of_linenumbers: u16,
+ flags: SectionHeaderFlags,
+
+ pub fn getName(self: *align(1) const SectionHeader) ?[]const u8 {
+ if (self.name[0] == '/') return null;
+ const len = std.mem.indexOfScalar(u8, &self.name, @as(u8, 0)) orelse self.name.len;
+ return self.name[0..len];
+ }
+
+ pub fn getNameOffset(self: SectionHeader) ?u32 {
+ if (self.name[0] != '/') return null;
+ const len = std.mem.indexOfScalar(u8, &self.name, @as(u8, 0)) orelse self.name.len;
+ const offset = std.fmt.parseInt(u32, self.name[1..len], 10) catch unreachable;
+ return offset;
+ }
+
+ /// Applicable only to section headers in COFF objects.
+ pub fn getAlignment(self: SectionHeader) ?u16 {
+ if (self.flags.ALIGN == 0) return null;
+ return std.math.powi(u16, 2, self.flags.ALIGN - 1) catch unreachable;
+ }
+
+ pub fn isComdat(self: SectionHeader) bool {
+ return self.flags.LNK_COMDAT == 0b1;
+ }
+};
+
+pub const SectionHeaderFlags = packed struct {
+ _reserved_0: u3 = 0,
+
+ /// The section should not be padded to the next boundary.
+ /// This flag is obsolete and is replaced by IMAGE_SCN_ALIGN_1BYTES.
+ /// This is valid only for object files.
+ TYPE_NO_PAD: u1 = 0,
+
+ _reserved_1: u1 = 0,
+
+ /// The section contains executable code.
+ CNT_CODE: u1 = 0,
+
+ /// The section contains initialized data.
+ CNT_INITIALIZED_DATA: u1 = 0,
+
+ /// The section contains uninitialized data.
+ CNT_UNINITIALIZED_DATA: u1 = 0,
+
+ /// Reserved for future use.
+ LNK_OTHER: u1 = 0,
+
+ /// The section contains comments or other information.
+ /// The .drectve section has this type.
+ /// This is valid for object files only.
+ LNK_INFO: u1 = 0,
+
+ _reserverd_2: u1 = 0,
+
+ /// The section will not become part of the image.
+ /// This is valid only for object files.
+ LNK_REMOVE: u1 = 0,
+
+ /// The section contains COMDAT data.
+ /// For more information, see COMDAT Sections (Object Only).
+ /// This is valid only for object files.
+ LNK_COMDAT: u1 = 0,
+
+ _reserved_3: u2 = 0,
+
+ /// The section contains data referenced through the global pointer (GP).
+ GPREL: u1 = 0,
+
+ /// Reserved for future use.
+ MEM_PURGEABLE: u1 = 0,
+
+ /// Reserved for future use.
+ MEM_16BIT: u1 = 0,
+
+ /// Reserved for future use.
+ MEM_LOCKED: u1 = 0,
+
+ /// Reserved for future use.
+ MEM_PRELOAD: u1 = 0,
+
+ /// Takes on multiple values according to flags:
+ /// pub const IMAGE_SCN_ALIGN_1BYTES: u32 = 0x100000;
+ /// pub const IMAGE_SCN_ALIGN_2BYTES: u32 = 0x200000;
+ /// pub const IMAGE_SCN_ALIGN_4BYTES: u32 = 0x300000;
+ /// pub const IMAGE_SCN_ALIGN_8BYTES: u32 = 0x400000;
+ /// pub const IMAGE_SCN_ALIGN_16BYTES: u32 = 0x500000;
+ /// pub const IMAGE_SCN_ALIGN_32BYTES: u32 = 0x600000;
+ /// pub const IMAGE_SCN_ALIGN_64BYTES: u32 = 0x700000;
+ /// pub const IMAGE_SCN_ALIGN_128BYTES: u32 = 0x800000;
+ /// pub const IMAGE_SCN_ALIGN_256BYTES: u32 = 0x900000;
+ /// pub const IMAGE_SCN_ALIGN_512BYTES: u32 = 0xA00000;
+ /// pub const IMAGE_SCN_ALIGN_1024BYTES: u32 = 0xB00000;
+ /// pub const IMAGE_SCN_ALIGN_2048BYTES: u32 = 0xC00000;
+ /// pub const IMAGE_SCN_ALIGN_4096BYTES: u32 = 0xD00000;
+ /// pub const IMAGE_SCN_ALIGN_8192BYTES: u32 = 0xE00000;
+ ALIGN: u4 = 0,
+
+ /// The section contains extended relocations.
+ LNK_NRELOC_OVFL: u1 = 0,
+
+ /// The section can be discarded as needed.
+ MEM_DISCARDABLE: u1 = 0,
+
+ /// The section cannot be cached.
+ MEM_NOT_CACHED: u1 = 0,
+
+ /// The section is not pageable.
+ MEM_NOT_PAGED: u1 = 0,
+
+ /// The section can be shared in memory.
+ MEM_SHARED: u1 = 0,
+
+ /// The section can be executed as code.
+ MEM_EXECUTE: u1 = 0,
+
+ /// The section can be read.
+ MEM_READ: u1 = 0,
+
+ /// The section can be written to.
+ MEM_WRITE: u1 = 0,
+};
+
+pub const Symbol = struct {
+ name: [8]u8,
+ value: u32,
+ section_number: SectionNumber,
+ @"type": SymType,
+ storage_class: StorageClass,
+ number_of_aux_symbols: u8,
+
+ pub fn sizeOf() usize {
+ return 18;
+ }
+
+ pub fn getName(self: *const Symbol) ?[]const u8 {
+ if (std.mem.eql(u8, self.name[0..4], "\x00\x00\x00\x00")) return null;
+ const len = std.mem.indexOfScalar(u8, &self.name, @as(u8, 0)) orelse self.name.len;
+ return self.name[0..len];
+ }
+
+ pub fn getNameOffset(self: Symbol) ?u32 {
+ if (!std.mem.eql(u8, self.name[0..4], "\x00\x00\x00\x00")) return null;
+ const offset = std.mem.readIntLittle(u32, self.name[4..8]);
+ return offset;
+ }
+};
+
+pub const SectionNumber = enum(u16) {
+ /// The symbol record is not yet assigned a section.
+ /// A value of zero indicates that a reference to an external symbol is defined elsewhere.
+ /// A value of non-zero is a common symbol with a size that is specified by the value.
+ UNDEFINED = 0,
+
+ /// The symbol has an absolute (non-relocatable) value and is not an address.
+ ABSOLUTE = 0xffff,
+
+ /// The symbol provides general type or debugging information but does not correspond to a section.
+ /// Microsoft tools use this setting along with .file records (storage class FILE).
+ DEBUG = 0xfffe,
+ _,
+};
+
+pub const SymType = packed struct {
+ complex_type: ComplexType,
+ base_type: BaseType,
+};
+
+pub const BaseType = enum(u8) {
+ /// No type information or unknown base type. Microsoft tools use this setting
+ NULL = 0,
+
+ /// No valid type; used with void pointers and functions
+ VOID = 1,
+
+ /// A character (signed byte)
+ CHAR = 2,
+
+ /// A 2-byte signed integer
+ SHORT = 3,
+
+ /// A natural integer type (normally 4 bytes in Windows)
+ INT = 4,
+
+ /// A 4-byte signed integer
+ LONG = 5,
+
+ /// A 4-byte floating-point number
+ FLOAT = 6,
+
+ /// An 8-byte floating-point number
+ DOUBLE = 7,
+
+ /// A structure
+ STRUCT = 8,
+
+ /// A union
+ UNION = 9,
+
+ /// An enumerated type
+ ENUM = 10,
+
+ /// A member of enumeration (a specified value)
+ MOE = 11,
+
+ /// A byte; unsigned 1-byte integer
+ BYTE = 12,
+
+ /// A word; unsigned 2-byte integer
+ WORD = 13,
+
+ /// An unsigned integer of natural size (normally, 4 bytes)
+ UINT = 14,
+
+ /// An unsigned 4-byte integer
+ DWORD = 15,
+};
+
+pub const ComplexType = enum(u8) {
+ /// No derived type; the symbol is a simple scalar variable.
+ NULL = 0,
+
+ /// The symbol is a pointer to base type.
+ POINTER = 16,
+
+ /// The symbol is a function that returns a base type.
+ FUNCTION = 32,
+
+ /// The symbol is an array of base type.
+ ARRAY = 48,
+};
+
+pub const StorageClass = enum(u8) {
+ /// A special symbol that represents the end of function, for debugging purposes.
+ END_OF_FUNCTION = 0xff,
+
+ /// No assigned storage class.
+ NULL = 0,
+
+ /// The automatic (stack) variable. The Value field specifies the stack frame offset.
+ AUTOMATIC = 1,
+
+ /// A value that Microsoft tools use for external symbols.
+ /// The Value field indicates the size if the section number is IMAGE_SYM_UNDEFINED (0).
+ /// If the section number is not zero, then the Value field specifies the offset within the section.
+ EXTERNAL = 2,
+
+ /// The offset of the symbol within the section.
+ /// If the Value field is zero, then the symbol represents a section name.
+ STATIC = 3,
+
+ /// A register variable.
+ /// The Value field specifies the register number.
+ REGISTER = 4,
+
+ /// A symbol that is defined externally.
+ EXTERNAL_DEF = 5,
+
+ /// A code label that is defined within the module.
+ /// The Value field specifies the offset of the symbol within the section.
+ LABEL = 6,
+
+ /// A reference to a code label that is not defined.
+ UNDEFINED_LABEL = 7,
+
+ /// The structure member. The Value field specifies the n th member.
+ MEMBER_OF_STRUCT = 8,
+
+ /// A formal argument (parameter) of a function. The Value field specifies the n th argument.
+ ARGUMENT = 9,
+
+ /// The structure tag-name entry.
+ STRUCT_TAG = 10,
+
+ /// A union member. The Value field specifies the n th member.
+ MEMBER_OF_UNION = 11,
+
+ /// The Union tag-name entry.
+ UNION_TAG = 12,
+
+ /// A Typedef entry.
+ TYPE_DEFINITION = 13,
+
+ /// A static data declaration.
+ UNDEFINED_STATIC = 14,
+
+ /// An enumerated type tagname entry.
+ ENUM_TAG = 15,
+
+ /// A member of an enumeration. The Value field specifies the n th member.
+ MEMBER_OF_ENUM = 16,
+
+ /// A register parameter.
+ REGISTER_PARAM = 17,
+
+ /// A bit-field reference. The Value field specifies the n th bit in the bit field.
+ BIT_FIELD = 18,
+
+ /// A .bb (beginning of block) or .eb (end of block) record.
+ /// The Value field is the relocatable address of the code location.
+ BLOCK = 100,
+
+ /// A value that Microsoft tools use for symbol records that define the extent of a function: begin function (.bf ), end function ( .ef ), and lines in function ( .lf ).
+ /// For .lf records, the Value field gives the number of source lines in the function.
+ /// For .ef records, the Value field gives the size of the function code.
+ FUNCTION = 101,
+
+ /// An end-of-structure entry.
+ END_OF_STRUCT = 102,
+
+ /// A value that Microsoft tools, as well as traditional COFF format, use for the source-file symbol record.
+ /// The symbol is followed by auxiliary records that name the file.
+ FILE = 103,
+
+ /// A definition of a section (Microsoft tools use STATIC storage class instead).
+ SECTION = 104,
+
+ /// A weak external. For more information, see Auxiliary Format 3: Weak Externals.
+ WEAK_EXTERNAL = 105,
+
+ /// A CLR token symbol. The name is an ASCII string that consists of the hexadecimal value of the token.
+ /// For more information, see CLR Token Definition (Object Only).
+ CLR_TOKEN = 107,
+};
+
+pub const FunctionDefinition = struct {
+ /// The symbol-table index of the corresponding .bf (begin function) symbol record.
+ tag_index: u32,
+
+ /// The size of the executable code for the function itself.
+ /// If the function is in its own section, the SizeOfRawData in the section header is greater or equal to this field,
+ /// depending on alignment considerations.
+ total_size: u32,
+
+ /// The file offset of the first COFF line-number entry for the function, or zero if none exists.
+ pointer_to_linenumber: u32,
+
+ /// The symbol-table index of the record for the next function.
+ /// If the function is the last in the symbol table, this field is set to zero.
+ pointer_to_next_function: u32,
+
+ unused: [2]u8,
+};
+
+pub const SectionDefinition = struct {
+ /// The size of section data; the same as SizeOfRawData in the section header.
+ length: u32,
+
+ /// The number of relocation entries for the section.
+ number_of_relocations: u16,
+
+ /// The number of line-number entries for the section.
+ number_of_linenumbers: u16,
+
+ /// The checksum for communal data. It is applicable if the IMAGE_SCN_LNK_COMDAT flag is set in the section header.
+ checksum: u32,
+
+ /// One-based index into the section table for the associated section. This is used when the COMDAT selection setting is 5.
+ number: u16,
+
+ /// The COMDAT selection number. This is applicable if the section is a COMDAT section.
+ selection: ComdatSelection,
+
+ unused: [3]u8,
+};
+
+pub const FileDefinition = struct {
+ /// An ANSI string that gives the name of the source file.
+ /// This is padded with nulls if it is less than the maximum length.
+ file_name: [18]u8,
+
+ pub fn getFileName(self: *const FileDefinition) []const u8 {
+ const len = std.mem.indexOfScalar(u8, &self.file_name, @as(u8, 0)) orelse self.file_name.len;
+ return self.file_name[0..len];
+ }
+};
+
+pub const WeakExternalDefinition = struct {
+ /// The symbol-table index of sym2, the symbol to be linked if sym1 is not found.
+ tag_index: u32,
+
+ /// A value of IMAGE_WEAK_EXTERN_SEARCH_NOLIBRARY indicates that no library search for sym1 should be performed.
+ /// A value of IMAGE_WEAK_EXTERN_SEARCH_LIBRARY indicates that a library search for sym1 should be performed.
+ /// A value of IMAGE_WEAK_EXTERN_SEARCH_ALIAS indicates that sym1 is an alias for sym2.
+ flag: WeakExternalFlag,
+
+ unused: [10]u8,
+};
+
+// https://github.com/tpn/winsdk-10/blob/master/Include/10.0.16299.0/km/ntimage.h
+pub const WeakExternalFlag = enum(u32) {
+ SEARCH_NOLIBRARY = 1,
+ SEARCH_LIBRARY = 2,
+ SEARCH_ALIAS = 3,
+ ANTI_DEPENDENCY = 4,
+};
+
+pub const ComdatSelection = enum(u8) {
+ /// Not a COMDAT section.
+ NONE = 0,
+
+ /// If this symbol is already defined, the linker issues a "multiply defined symbol" error.
+ NODUPLICATES = 1,
+
+ /// Any section that defines the same COMDAT symbol can be linked; the rest are removed.
+ ANY = 2,
+
+ /// The linker chooses an arbitrary section among the definitions for this symbol.
+ /// If all definitions are not the same size, a "multiply defined symbol" error is issued.
+ SAME_SIZE = 3,
+
+ /// The linker chooses an arbitrary section among the definitions for this symbol.
+ /// If all definitions do not match exactly, a "multiply defined symbol" error is issued.
+ EXACT_MATCH = 4,
+
+ /// The section is linked if a certain other COMDAT section is linked.
+ /// This other section is indicated by the Number field of the auxiliary symbol record for the section definition.
+ /// This setting is useful for definitions that have components in multiple sections
+ /// (for example, code in one and data in another), but where all must be linked or discarded as a set.
+ /// The other section this section is associated with must be a COMDAT section, which can be another
+ /// associative COMDAT section. An associative COMDAT section's section association chain can't form a loop.
+ /// The section association chain must eventually come to a COMDAT section that doesn't have IMAGE_COMDAT_SELECT_ASSOCIATIVE set.
+ ASSOCIATIVE = 5,
+
+ /// The linker chooses the largest definition from among all of the definitions for this symbol.
+ /// If multiple definitions have this size, the choice between them is arbitrary.
+ LARGEST = 6,
+};
+
+pub const DebugInfoDefinition = struct {
+ unused_1: [4]u8,
+
+ /// The actual ordinal line number (1, 2, 3, and so on) within the source file, corresponding to the .bf or .ef record.
+ linenumber: u16,
+
+ unused_2: [6]u8,
+
+ /// The symbol-table index of the next .bf symbol record.
+ /// If the function is the last in the symbol table, this field is set to zero.
+ /// It is not used for .ef records.
+ pointer_to_next_function: u32,
+
+ unused_3: [2]u8,
+};
pub const MachineType = enum(u16) {
Unknown = 0x0,
@@ -77,25 +794,6 @@ pub const MachineType = enum(u16) {
}
};
-// OptionalHeader.magic values
-// see https://msdn.microsoft.com/en-us/library/windows/desktop/ms680339(v=vs.85).aspx
-const IMAGE_NT_OPTIONAL_HDR32_MAGIC = 0x10b;
-const IMAGE_NT_OPTIONAL_HDR64_MAGIC = 0x20b;
-
-// Image Characteristics
-pub const IMAGE_FILE_RELOCS_STRIPPED = 0x1;
-pub const IMAGE_FILE_DEBUG_STRIPPED = 0x200;
-pub const IMAGE_FILE_EXECUTABLE_IMAGE = 0x2;
-pub const IMAGE_FILE_32BIT_MACHINE = 0x100;
-pub const IMAGE_FILE_LARGE_ADDRESS_AWARE = 0x20;
-
-// Section flags
-pub const IMAGE_SCN_CNT_INITIALIZED_DATA = 0x40;
-pub const IMAGE_SCN_MEM_READ = 0x40000000;
-pub const IMAGE_SCN_CNT_CODE = 0x20;
-pub const IMAGE_SCN_MEM_EXECUTE = 0x20000000;
-pub const IMAGE_SCN_MEM_WRITE = 0x80000000;
-
const IMAGE_NUMBEROF_DIRECTORY_ENTRIES = 16;
const IMAGE_DEBUG_TYPE_CODEVIEW = 2;
const DEBUG_DIRECTORY = 6;
@@ -104,166 +802,87 @@ pub const CoffError = error{
InvalidPEMagic,
InvalidPEHeader,
InvalidMachine,
+ MissingPEHeader,
MissingCoffSection,
MissingStringTable,
};
// Official documentation of the format: https://docs.microsoft.com/en-us/windows/win32/debug/pe-format
pub const Coff = struct {
- in_file: File,
allocator: mem.Allocator,
+ data: []const u8 = undefined,
+ is_image: bool = false,
+ coff_header_offset: usize = 0,
- coff_header: CoffHeader,
- pe_header: OptionalHeader,
- sections: std.ArrayListUnmanaged(Section) = .{},
-
- guid: [16]u8,
- age: u32,
-
- pub fn init(allocator: mem.Allocator, in_file: File) Coff {
- return Coff{
- .in_file = in_file,
- .allocator = allocator,
- .coff_header = undefined,
- .pe_header = undefined,
- .guid = undefined,
- .age = undefined,
- };
- }
+ guid: [16]u8 = undefined,
+ age: u32 = undefined,
pub fn deinit(self: *Coff) void {
- self.sections.deinit(self.allocator);
+ self.allocator.free(self.data);
}
- pub fn loadHeader(self: *Coff) !void {
+ /// Takes ownership of `data`.
+ pub fn parse(self: *Coff, data: []const u8) !void {
+ self.data = data;
+
const pe_pointer_offset = 0x3C;
+ const pe_magic = "PE\x00\x00";
- const in = self.in_file.reader();
+ var stream = std.io.fixedBufferStream(self.data);
+ const reader = stream.reader();
+ try stream.seekTo(pe_pointer_offset);
+ const coff_header_offset = try reader.readByte();
+ try stream.seekTo(coff_header_offset);
+ var buf: [4]u8 = undefined;
+ try reader.readNoEof(&buf);
+ self.is_image = mem.eql(u8, pe_magic, &buf);
- var magic: [2]u8 = undefined;
- try in.readNoEof(magic[0..]);
- if (!mem.eql(u8, &magic, "MZ"))
- return error.InvalidPEMagic;
-
- // Seek to PE File Header (coff header)
- try self.in_file.seekTo(pe_pointer_offset);
- const pe_magic_offset = try in.readIntLittle(u32);
- try self.in_file.seekTo(pe_magic_offset);
-
- var pe_header_magic: [4]u8 = undefined;
- try in.readNoEof(pe_header_magic[0..]);
- if (!mem.eql(u8, &pe_header_magic, &[_]u8{ 'P', 'E', 0, 0 }))
- return error.InvalidPEHeader;
-
- self.coff_header = CoffHeader{
- .machine = try in.readIntLittle(u16),
- .number_of_sections = try in.readIntLittle(u16),
- .timedate_stamp = try in.readIntLittle(u32),
- .pointer_to_symbol_table = try in.readIntLittle(u32),
- .number_of_symbols = try in.readIntLittle(u32),
- .size_of_optional_header = try in.readIntLittle(u16),
- .characteristics = try in.readIntLittle(u16),
- };
-
- switch (self.coff_header.machine) {
- IMAGE_FILE_MACHINE_I386, IMAGE_FILE_MACHINE_AMD64, IMAGE_FILE_MACHINE_IA64 => {},
- else => return error.InvalidMachine,
+ // Do some basic validation upfront
+ if (self.is_image) {
+ self.coff_header_offset = coff_header_offset + 4;
+ const coff_header = self.getCoffHeader();
+ if (coff_header.size_of_optional_header == 0) return error.MissingPEHeader;
}
- try self.loadOptionalHeader();
- }
-
- fn readStringFromTable(self: *Coff, offset: usize, buf: []u8) ![]const u8 {
- if (self.coff_header.pointer_to_symbol_table == 0) {
- // No symbol table therefore no string table
- return error.MissingStringTable;
- }
- // The string table is at the end of the symbol table and symbols are 18 bytes long
- const string_table_offset = self.coff_header.pointer_to_symbol_table + (self.coff_header.number_of_symbols * 18) + offset;
- const in = self.in_file.reader();
- const old_pos = try self.in_file.getPos();
-
- try self.in_file.seekTo(string_table_offset);
- defer {
- self.in_file.seekTo(old_pos) catch unreachable;
- }
-
- const str = try in.readUntilDelimiterOrEof(buf, 0);
- return str orelse "";
- }
-
- fn loadOptionalHeader(self: *Coff) !void {
- const in = self.in_file.reader();
- const opt_header_pos = try self.in_file.getPos();
-
- self.pe_header.magic = try in.readIntLittle(u16);
- try self.in_file.seekTo(opt_header_pos + 16);
- self.pe_header.entry_addr = try in.readIntLittle(u32);
- try self.in_file.seekTo(opt_header_pos + 20);
- self.pe_header.code_base = try in.readIntLittle(u32);
-
- // The header structure is different for 32 or 64 bit
- var num_rva_pos: u64 = undefined;
- if (self.pe_header.magic == IMAGE_NT_OPTIONAL_HDR32_MAGIC) {
- num_rva_pos = opt_header_pos + 92;
-
- try self.in_file.seekTo(opt_header_pos + 28);
- const image_base32 = try in.readIntLittle(u32);
- self.pe_header.image_base = image_base32;
- } else if (self.pe_header.magic == IMAGE_NT_OPTIONAL_HDR64_MAGIC) {
- num_rva_pos = opt_header_pos + 108;
-
- try self.in_file.seekTo(opt_header_pos + 24);
- self.pe_header.image_base = try in.readIntLittle(u64);
- } else return error.InvalidPEMagic;
-
- try self.in_file.seekTo(num_rva_pos);
-
- const number_of_rva_and_sizes = try in.readIntLittle(u32);
- if (number_of_rva_and_sizes != IMAGE_NUMBEROF_DIRECTORY_ENTRIES)
- return error.InvalidPEHeader;
-
- for (self.pe_header.data_directory) |*data_dir| {
- data_dir.* = OptionalHeader.DataDirectory{
- .virtual_address = try in.readIntLittle(u32),
- .size = try in.readIntLittle(u32),
- };
- }
+ // JK: we used to check for architecture here and throw an error if not x86 or derivative.
+ // However I am willing to take a leap of faith and let aarch64 have a shot also.
}
pub fn getPdbPath(self: *Coff, buffer: []u8) !usize {
- try self.loadSections();
+ assert(self.is_image);
const header = blk: {
- if (self.getSection(".buildid")) |section| {
- break :blk section.header;
- } else if (self.getSection(".rdata")) |section| {
- break :blk section.header;
+ if (self.getSectionByName(".buildid")) |hdr| {
+ break :blk hdr;
+ } else if (self.getSectionByName(".rdata")) |hdr| {
+ break :blk hdr;
} else {
return error.MissingCoffSection;
}
};
- const debug_dir = &self.pe_header.data_directory[DEBUG_DIRECTORY];
+ const data_dirs = self.getDataDirectories();
+ const debug_dir = data_dirs[DEBUG_DIRECTORY];
const file_offset = debug_dir.virtual_address - header.virtual_address + header.pointer_to_raw_data;
- const in = self.in_file.reader();
- try self.in_file.seekTo(file_offset);
+ var stream = std.io.fixedBufferStream(self.data);
+ const reader = stream.reader();
+ try stream.seekTo(file_offset);
// Find the correct DebugDirectoryEntry, and where its data is stored.
// It can be in any section.
const debug_dir_entry_count = debug_dir.size / @sizeOf(DebugDirectoryEntry);
var i: u32 = 0;
blk: while (i < debug_dir_entry_count) : (i += 1) {
- const debug_dir_entry = try in.readStruct(DebugDirectoryEntry);
+ const debug_dir_entry = try reader.readStruct(DebugDirectoryEntry);
if (debug_dir_entry.type == IMAGE_DEBUG_TYPE_CODEVIEW) {
- for (self.sections.items) |*section| {
- const section_start = section.header.virtual_address;
- const section_size = section.header.misc.virtual_size;
+ for (self.getSectionHeaders()) |*section| {
+ const section_start = section.virtual_address;
+ const section_size = section.virtual_size;
const rva = debug_dir_entry.address_of_raw_data;
const offset = rva - section_start;
if (section_start <= rva and offset < section_size and debug_dir_entry.size_of_data <= section_size - offset) {
- try self.in_file.seekTo(section.header.pointer_to_raw_data + offset);
+ try stream.seekTo(section.pointer_to_raw_data + offset);
break :blk;
}
}
@@ -271,19 +890,19 @@ pub const Coff = struct {
}
var cv_signature: [4]u8 = undefined; // CodeView signature
- try in.readNoEof(cv_signature[0..]);
+ try reader.readNoEof(cv_signature[0..]);
// 'RSDS' indicates PDB70 format, used by lld.
if (!mem.eql(u8, &cv_signature, "RSDS"))
return error.InvalidPEMagic;
- try in.readNoEof(self.guid[0..]);
- self.age = try in.readIntLittle(u32);
+ try reader.readNoEof(self.guid[0..]);
+ self.age = try reader.readIntLittle(u32);
// Finally read the null-terminated string.
- var byte = try in.readByte();
+ var byte = try reader.readByte();
i = 0;
while (byte != 0 and i < buffer.len) : (i += 1) {
buffer[i] = byte;
- byte = try in.readByte();
+ byte = try reader.readByte();
}
if (byte != 0 and i == buffer.len)
@@ -292,126 +911,232 @@ pub const Coff = struct {
return @as(usize, i);
}
- pub fn loadSections(self: *Coff) !void {
- if (self.sections.items.len == self.coff_header.number_of_sections)
- return;
-
- try self.sections.ensureTotalCapacityPrecise(self.allocator, self.coff_header.number_of_sections);
-
- const in = self.in_file.reader();
-
- var name: [32]u8 = undefined;
-
- var i: u16 = 0;
- while (i < self.coff_header.number_of_sections) : (i += 1) {
- try in.readNoEof(name[0..8]);
-
- if (name[0] == '/') {
- // This is a long name and stored in the string table
- const offset_len = mem.indexOfScalar(u8, name[1..], 0) orelse 7;
-
- const str_offset = try std.fmt.parseInt(u32, name[1 .. offset_len + 1], 10);
- const str = try self.readStringFromTable(str_offset, &name);
- std.mem.set(u8, name[str.len..], 0);
- } else {
- std.mem.set(u8, name[8..], 0);
- }
-
- self.sections.appendAssumeCapacity(Section{
- .header = SectionHeader{
- .name = name,
- .misc = SectionHeader.Misc{ .virtual_size = try in.readIntLittle(u32) },
- .virtual_address = try in.readIntLittle(u32),
- .size_of_raw_data = try in.readIntLittle(u32),
- .pointer_to_raw_data = try in.readIntLittle(u32),
- .pointer_to_relocations = try in.readIntLittle(u32),
- .pointer_to_line_numbers = try in.readIntLittle(u32),
- .number_of_relocations = try in.readIntLittle(u16),
- .number_of_line_numbers = try in.readIntLittle(u16),
- .characteristics = try in.readIntLittle(u32),
- },
- });
- }
+ pub fn getCoffHeader(self: Coff) CoffHeader {
+ return @ptrCast(*align(1) const CoffHeader, self.data[self.coff_header_offset..][0..@sizeOf(CoffHeader)]).*;
}
- pub fn getSection(self: *Coff, comptime name: []const u8) ?*Section {
- for (self.sections.items) |*sec| {
- if (mem.eql(u8, sec.header.name[0..name.len], name)) {
- return sec;
+ pub fn getOptionalHeader(self: Coff) OptionalHeader {
+ assert(self.is_image);
+ const offset = self.coff_header_offset + @sizeOf(CoffHeader);
+ return @ptrCast(*align(1) const OptionalHeader, self.data[offset..][0..@sizeOf(OptionalHeader)]).*;
+ }
+
+ pub fn getOptionalHeader32(self: Coff) OptionalHeaderPE32 {
+ assert(self.is_image);
+ const offset = self.coff_header_offset + @sizeOf(CoffHeader);
+ return @ptrCast(*align(1) const OptionalHeaderPE32, self.data[offset..][0..@sizeOf(OptionalHeaderPE32)]).*;
+ }
+
+ pub fn getOptionalHeader64(self: Coff) OptionalHeaderPE64 {
+ assert(self.is_image);
+ const offset = self.coff_header_offset + @sizeOf(CoffHeader);
+ return @ptrCast(*align(1) const OptionalHeaderPE64, self.data[offset..][0..@sizeOf(OptionalHeaderPE64)]).*;
+ }
+
+ pub fn getImageBase(self: Coff) u64 {
+ const hdr = self.getOptionalHeader();
+ return switch (hdr.magic) {
+ IMAGE_NT_OPTIONAL_HDR32_MAGIC => self.getOptionalHeader32().image_base,
+ IMAGE_NT_OPTIONAL_HDR64_MAGIC => self.getOptionalHeader64().image_base,
+ else => unreachable, // We assume we have validated the header already
+ };
+ }
+
+ pub fn getNumberOfDataDirectories(self: Coff) u32 {
+ const hdr = self.getOptionalHeader();
+ return switch (hdr.magic) {
+ IMAGE_NT_OPTIONAL_HDR32_MAGIC => self.getOptionalHeader32().number_of_rva_and_sizes,
+ IMAGE_NT_OPTIONAL_HDR64_MAGIC => self.getOptionalHeader64().number_of_rva_and_sizes,
+ else => unreachable, // We assume we have validated the header already
+ };
+ }
+
+ pub fn getDataDirectories(self: *const Coff) []align(1) const ImageDataDirectory {
+ const hdr = self.getOptionalHeader();
+ const size: usize = switch (hdr.magic) {
+ IMAGE_NT_OPTIONAL_HDR32_MAGIC => @sizeOf(OptionalHeaderPE32),
+ IMAGE_NT_OPTIONAL_HDR64_MAGIC => @sizeOf(OptionalHeaderPE64),
+ else => unreachable, // We assume we have validated the header already
+ };
+ const offset = self.coff_header_offset + @sizeOf(CoffHeader) + size;
+ return @ptrCast([*]align(1) const ImageDataDirectory, self.data[offset..])[0..self.getNumberOfDataDirectories()];
+ }
+
+ pub fn getSymtab(self: *const Coff) ?Symtab {
+ const coff_header = self.getCoffHeader();
+ if (coff_header.pointer_to_symbol_table == 0) return null;
+
+ const offset = coff_header.pointer_to_symbol_table;
+ const size = coff_header.number_of_symbols * Symbol.sizeOf();
+ return .{ .buffer = self.data[offset..][0..size] };
+ }
+
+ pub fn getStrtab(self: *const Coff) ?Strtab {
+ const coff_header = self.getCoffHeader();
+ if (coff_header.pointer_to_symbol_table == 0) return null;
+
+ const offset = coff_header.pointer_to_symbol_table + Symbol.sizeOf() * coff_header.number_of_symbols;
+ const size = mem.readIntLittle(u32, self.data[offset..][0..4]);
+ return Strtab{ .buffer = self.data[offset..][0..size] };
+ }
+
+ pub fn getSectionHeaders(self: *const Coff) []align(1) const SectionHeader {
+ const coff_header = self.getCoffHeader();
+ const offset = self.coff_header_offset + @sizeOf(CoffHeader) + coff_header.size_of_optional_header;
+ return @ptrCast([*]align(1) const SectionHeader, self.data.ptr + offset)[0..coff_header.number_of_sections];
+ }
+
+ pub fn getSectionName(self: *const Coff, sect_hdr: *align(1) const SectionHeader) []const u8 {
+ const name = sect_hdr.getName() orelse blk: {
+ const strtab = self.getStrtab().?;
+ const name_offset = sect_hdr.getNameOffset().?;
+ break :blk strtab.get(name_offset);
+ };
+ return name;
+ }
+
+ pub fn getSectionByName(self: *const Coff, comptime name: []const u8) ?*align(1) const SectionHeader {
+ for (self.getSectionHeaders()) |*sect| {
+ if (mem.eql(u8, self.getSectionName(sect), name)) {
+ return sect;
}
}
return null;
}
// Return an owned slice full of the section data
- pub fn getSectionData(self: *Coff, comptime name: []const u8, allocator: mem.Allocator) ![]u8 {
- const sec = for (self.sections.items) |*sec| {
- if (mem.eql(u8, sec.header.name[0..name.len], name)) {
- break sec;
- }
- } else {
- return error.MissingCoffSection;
- };
- const in = self.in_file.reader();
- try self.in_file.seekTo(sec.header.pointer_to_raw_data);
- const out_buff = try allocator.alloc(u8, sec.header.misc.virtual_size);
- try in.readNoEof(out_buff);
+ pub fn getSectionDataAlloc(self: *const Coff, comptime name: []const u8, allocator: mem.Allocator) ![]u8 {
+ const sec = self.getSectionByName(name) orelse return error.MissingCoffSection;
+ const out_buff = try allocator.alloc(u8, sec.virtual_size);
+ mem.copy(u8, out_buff, self.data[sec.pointer_to_raw_data..][0..sec.virtual_size]);
return out_buff;
}
-};
-const CoffHeader = struct {
- machine: u16,
- number_of_sections: u16,
- timedate_stamp: u32,
- pointer_to_symbol_table: u32,
- number_of_symbols: u32,
- size_of_optional_header: u16,
- characteristics: u16,
-};
+ pub const Symtab = struct {
+ buffer: []const u8,
-const OptionalHeader = struct {
- const DataDirectory = struct {
- virtual_address: u32,
- size: u32,
+ fn len(self: Symtab) usize {
+ return @divExact(self.buffer.len, Symbol.sizeOf());
+ }
+
+ const Tag = enum {
+ symbol,
+ func_def,
+ debug_info,
+ weak_ext,
+ file_def,
+ sect_def,
+ };
+
+ const Record = union(Tag) {
+ symbol: Symbol,
+ debug_info: DebugInfoDefinition,
+ func_def: FunctionDefinition,
+ weak_ext: WeakExternalDefinition,
+ file_def: FileDefinition,
+ sect_def: SectionDefinition,
+ };
+
+ /// Lives as long as Symtab instance.
+ fn at(self: Symtab, index: usize, tag: Tag) Record {
+ const offset = index * Symbol.sizeOf();
+ const raw = self.buffer[offset..][0..Symbol.sizeOf()];
+ return switch (tag) {
+ .symbol => .{ .symbol = asSymbol(raw) },
+ .debug_info => .{ .debug_info = asDebugInfo(raw) },
+ .func_def => .{ .func_def = asFuncDef(raw) },
+ .weak_ext => .{ .weak_ext = asWeakExtDef(raw) },
+ .file_def => .{ .file_def = asFileDef(raw) },
+ .sect_def => .{ .sect_def = asSectDef(raw) },
+ };
+ }
+
+ fn asSymbol(raw: []const u8) Symbol {
+ return .{
+ .name = raw[0..8].*,
+ .value = mem.readIntLittle(u32, raw[8..12]),
+ .section_number = @intToEnum(SectionNumber, mem.readIntLittle(u16, raw[12..14])),
+ .@"type" = @bitCast(SymType, mem.readIntLittle(u16, raw[14..16])),
+ .storage_class = @intToEnum(StorageClass, raw[16]),
+ .number_of_aux_symbols = raw[17],
+ };
+ }
+
+ fn asDebugInfo(raw: []const u8) DebugInfoDefinition {
+ return .{
+ .unused_1 = raw[0..4].*,
+ .linenumber = mem.readIntLittle(u16, raw[4..6]),
+ .unused_2 = raw[6..12].*,
+ .pointer_to_next_function = mem.readIntLittle(u32, raw[12..16]),
+ .unused_3 = raw[16..18].*,
+ };
+ }
+
+ fn asFuncDef(raw: []const u8) FunctionDefinition {
+ return .{
+ .tag_index = mem.readIntLittle(u32, raw[0..4]),
+ .total_size = mem.readIntLittle(u32, raw[4..8]),
+ .pointer_to_linenumber = mem.readIntLittle(u32, raw[8..12]),
+ .pointer_to_next_function = mem.readIntLittle(u32, raw[12..16]),
+ .unused = raw[16..18].*,
+ };
+ }
+
+ fn asWeakExtDef(raw: []const u8) WeakExternalDefinition {
+ return .{
+ .tag_index = mem.readIntLittle(u32, raw[0..4]),
+ .flag = @intToEnum(WeakExternalFlag, mem.readIntLittle(u32, raw[4..8])),
+ .unused = raw[8..18].*,
+ };
+ }
+
+ fn asFileDef(raw: []const u8) FileDefinition {
+ return .{
+ .file_name = raw[0..18].*,
+ };
+ }
+
+ fn asSectDef(raw: []const u8) SectionDefinition {
+ return .{
+ .length = mem.readIntLittle(u32, raw[0..4]),
+ .number_of_relocations = mem.readIntLittle(u16, raw[4..6]),
+ .number_of_linenumbers = mem.readIntLittle(u16, raw[6..8]),
+ .checksum = mem.readIntLittle(u32, raw[8..12]),
+ .number = mem.readIntLittle(u16, raw[12..14]),
+ .selection = @intToEnum(ComdatSelection, raw[14]),
+ .unused = raw[15..18].*,
+ };
+ }
+
+ const Slice = struct {
+ buffer: []const u8,
+ num: usize,
+ count: usize = 0,
+
+ /// Lives as long as Symtab instance.
+ fn next(self: *Slice) ?Symbol {
+ if (self.count >= self.num) return null;
+ const sym = asSymbol(self.buffer[0..Symbol.sizeOf()]);
+ self.count += 1;
+ self.buffer = self.buffer[Symbol.sizeOf()..];
+ return sym;
+ }
+ };
+
+ fn slice(self: Symtab, start: usize, end: ?usize) Slice {
+ const offset = start * Symbol.sizeOf();
+ const llen = if (end) |e| e * Symbol.sizeOf() else self.buffer.len;
+ const num = @divExact(llen - offset, Symbol.sizeOf());
+ return Slice{ .buffer = self.buffer[offset..][0..llen], .num = num };
+ }
};
- magic: u16,
- data_directory: [IMAGE_NUMBEROF_DIRECTORY_ENTRIES]DataDirectory,
- entry_addr: u32,
- code_base: u32,
- image_base: u64,
-};
+ pub const Strtab = struct {
+ buffer: []const u8,
-const DebugDirectoryEntry = packed struct {
- characteristiccs: u32,
- time_date_stamp: u32,
- major_version: u16,
- minor_version: u16,
- @"type": u32,
- size_of_data: u32,
- address_of_raw_data: u32,
- pointer_to_raw_data: u32,
-};
-
-pub const Section = struct {
- header: SectionHeader,
-};
-
-const SectionHeader = struct {
- const Misc = union {
- physical_address: u32,
- virtual_size: u32,
+ fn get(self: Strtab, off: u32) []const u8 {
+ assert(off < self.buffer.len);
+ return mem.sliceTo(@ptrCast([*:0]const u8, self.buffer.ptr + off), 0);
+ }
};
-
- name: [32]u8,
- misc: Misc,
- virtual_address: u32,
- size_of_raw_data: u32,
- pointer_to_raw_data: u32,
- pointer_to_relocations: u32,
- pointer_to_line_numbers: u32,
- number_of_relocations: u16,
- number_of_line_numbers: u16,
- characteristics: u32,
};
diff --git a/lib/std/compress/deflate/bits_utils.zig b/lib/std/compress/deflate/bits_utils.zig
index cb1237c900..1620a8e380 100644
--- a/lib/std/compress/deflate/bits_utils.zig
+++ b/lib/std/compress/deflate/bits_utils.zig
@@ -2,7 +2,7 @@ const math = @import("std").math;
// Reverse bit-by-bit a N-bit code.
pub fn bitReverse(comptime T: type, value: T, N: usize) T {
- const r = @bitReverse(T, value);
+ const r = @bitReverse(value);
return r >> @intCast(math.Log2Int(T), @typeInfo(T).Int.bits - N);
}
diff --git a/lib/std/crypto/25519/ed25519.zig b/lib/std/crypto/25519/ed25519.zig
index 2a7671863e..7066b1a154 100644
--- a/lib/std/crypto/25519/ed25519.zig
+++ b/lib/std/crypto/25519/ed25519.zig
@@ -355,7 +355,9 @@ test "ed25519 batch verification" {
try Ed25519.verifyBatch(2, signature_batch);
signature_batch[1].sig = sig1;
- try std.testing.expectError(error.SignatureVerificationFailed, Ed25519.verifyBatch(signature_batch.len, signature_batch));
+ // TODO https://github.com/ziglang/zig/issues/12240
+ const sig_len = signature_batch.len;
+ try std.testing.expectError(error.SignatureVerificationFailed, Ed25519.verifyBatch(sig_len, signature_batch));
}
}
diff --git a/lib/std/crypto/aes_ocb.zig b/lib/std/crypto/aes_ocb.zig
index cc50bbbe45..68f5bc4a9a 100644
--- a/lib/std/crypto/aes_ocb.zig
+++ b/lib/std/crypto/aes_ocb.zig
@@ -66,7 +66,7 @@ fn AesOcb(comptime Aes: anytype) type {
var offset = [_]u8{0} ** 16;
var i: usize = 0;
while (i < full_blocks) : (i += 1) {
- xorWith(&offset, lt[@ctz(usize, i + 1)]);
+ xorWith(&offset, lt[@ctz(i + 1)]);
var e = xorBlocks(offset, a[i * 16 ..][0..16].*);
aes_enc_ctx.encrypt(&e, &e);
xorWith(&sum, e);
@@ -129,7 +129,7 @@ fn AesOcb(comptime Aes: anytype) type {
var es: [16 * wb]u8 align(16) = undefined;
var j: usize = 0;
while (j < wb) : (j += 1) {
- xorWith(&offset, lt[@ctz(usize, i + 1 + j)]);
+ xorWith(&offset, lt[@ctz(i + 1 + j)]);
offsets[j] = offset;
const p = m[(i + j) * 16 ..][0..16].*;
mem.copy(u8, es[j * 16 ..][0..16], &xorBlocks(p, offsets[j]));
@@ -143,7 +143,7 @@ fn AesOcb(comptime Aes: anytype) type {
}
}
while (i < full_blocks) : (i += 1) {
- xorWith(&offset, lt[@ctz(usize, i + 1)]);
+ xorWith(&offset, lt[@ctz(i + 1)]);
const p = m[i * 16 ..][0..16].*;
var e = xorBlocks(p, offset);
aes_enc_ctx.encrypt(&e, &e);
@@ -193,7 +193,7 @@ fn AesOcb(comptime Aes: anytype) type {
var es: [16 * wb]u8 align(16) = undefined;
var j: usize = 0;
while (j < wb) : (j += 1) {
- xorWith(&offset, lt[@ctz(usize, i + 1 + j)]);
+ xorWith(&offset, lt[@ctz(i + 1 + j)]);
offsets[j] = offset;
const q = c[(i + j) * 16 ..][0..16].*;
mem.copy(u8, es[j * 16 ..][0..16], &xorBlocks(q, offsets[j]));
@@ -207,7 +207,7 @@ fn AesOcb(comptime Aes: anytype) type {
}
}
while (i < full_blocks) : (i += 1) {
- xorWith(&offset, lt[@ctz(usize, i + 1)]);
+ xorWith(&offset, lt[@ctz(i + 1)]);
const q = c[i * 16 ..][0..16].*;
var e = xorBlocks(q, offset);
aes_dec_ctx.decrypt(&e, &e);
diff --git a/lib/std/crypto/ghash.zig b/lib/std/crypto/ghash.zig
index f3fac0038e..8f57f9033a 100644
--- a/lib/std/crypto/ghash.zig
+++ b/lib/std/crypto/ghash.zig
@@ -41,8 +41,8 @@ pub const Ghash = struct {
pub fn init(key: *const [key_length]u8) Ghash {
const h1 = mem.readIntBig(u64, key[0..8]);
const h0 = mem.readIntBig(u64, key[8..16]);
- const h1r = @bitReverse(u64, h1);
- const h0r = @bitReverse(u64, h0);
+ const h1r = @bitReverse(h1);
+ const h0r = @bitReverse(h0);
const h2 = h0 ^ h1;
const h2r = h0r ^ h1r;
@@ -68,8 +68,8 @@ pub const Ghash = struct {
hh.update(key);
const hh1 = hh.y1;
const hh0 = hh.y0;
- const hh1r = @bitReverse(u64, hh1);
- const hh0r = @bitReverse(u64, hh0);
+ const hh1r = @bitReverse(hh1);
+ const hh0r = @bitReverse(hh0);
const hh2 = hh0 ^ hh1;
const hh2r = hh0r ^ hh1r;
@@ -156,8 +156,8 @@ pub const Ghash = struct {
y1 ^= mem.readIntBig(u64, msg[i..][0..8]);
y0 ^= mem.readIntBig(u64, msg[i..][8..16]);
- const y1r = @bitReverse(u64, y1);
- const y0r = @bitReverse(u64, y0);
+ const y1r = @bitReverse(y1);
+ const y0r = @bitReverse(y0);
const y2 = y0 ^ y1;
const y2r = y0r ^ y1r;
@@ -172,8 +172,8 @@ pub const Ghash = struct {
const sy1 = mem.readIntBig(u64, msg[i..][16..24]);
const sy0 = mem.readIntBig(u64, msg[i..][24..32]);
- const sy1r = @bitReverse(u64, sy1);
- const sy0r = @bitReverse(u64, sy0);
+ const sy1r = @bitReverse(sy1);
+ const sy0r = @bitReverse(sy0);
const sy2 = sy0 ^ sy1;
const sy2r = sy0r ^ sy1r;
@@ -191,9 +191,9 @@ pub const Ghash = struct {
z0h ^= sz0h;
z1h ^= sz1h;
z2h ^= sz2h;
- z0h = @bitReverse(u64, z0h) >> 1;
- z1h = @bitReverse(u64, z1h) >> 1;
- z2h = @bitReverse(u64, z2h) >> 1;
+ z0h = @bitReverse(z0h) >> 1;
+ z1h = @bitReverse(z1h) >> 1;
+ z2h = @bitReverse(z2h) >> 1;
var v3 = z1h;
var v2 = z1 ^ z2h;
@@ -217,8 +217,8 @@ pub const Ghash = struct {
y1 ^= mem.readIntBig(u64, msg[i..][0..8]);
y0 ^= mem.readIntBig(u64, msg[i..][8..16]);
- const y1r = @bitReverse(u64, y1);
- const y0r = @bitReverse(u64, y0);
+ const y1r = @bitReverse(y1);
+ const y0r = @bitReverse(y0);
const y2 = y0 ^ y1;
const y2r = y0r ^ y1r;
@@ -228,9 +228,9 @@ pub const Ghash = struct {
var z0h = clmul(y0r, st.h0r);
var z1h = clmul(y1r, st.h1r);
var z2h = clmul(y2r, st.h2r) ^ z0h ^ z1h;
- z0h = @bitReverse(u64, z0h) >> 1;
- z1h = @bitReverse(u64, z1h) >> 1;
- z2h = @bitReverse(u64, z2h) >> 1;
+ z0h = @bitReverse(z0h) >> 1;
+ z1h = @bitReverse(z1h) >> 1;
+ z2h = @bitReverse(z2h) >> 1;
// shift & reduce
var v3 = z1h;
diff --git a/lib/std/debug.zig b/lib/std/debug.zig
index 7d0dcd35d0..a7f0b202cb 100644
--- a/lib/std/debug.zig
+++ b/lib/std/debug.zig
@@ -816,11 +816,11 @@ pub fn openSelfDebugInfo(allocator: mem.Allocator) anyerror!DebugInfo {
/// TODO it's weird to take ownership even on error, rework this code.
fn readCoffDebugInfo(allocator: mem.Allocator, coff_file: File) !ModuleDebugInfo {
nosuspend {
- errdefer coff_file.close();
+ defer coff_file.close();
const coff_obj = try allocator.create(coff.Coff);
errdefer allocator.destroy(coff_obj);
- coff_obj.* = coff.Coff.init(allocator, coff_file);
+ coff_obj.* = .{ .allocator = allocator };
var di = ModuleDebugInfo{
.base_address = undefined,
@@ -828,27 +828,42 @@ fn readCoffDebugInfo(allocator: mem.Allocator, coff_file: File) !ModuleDebugInfo
.debug_data = undefined,
};
- try di.coff.loadHeader();
- try di.coff.loadSections();
- if (di.coff.getSection(".debug_info")) |sec| {
+ // TODO convert to Windows' memory-mapped file API
+ const file_len = math.cast(usize, try coff_file.getEndPos()) orelse math.maxInt(usize);
+ const data = try coff_file.readToEndAlloc(allocator, file_len);
+ try di.coff.parse(data);
+
+ if (di.coff.getSectionByName(".debug_info")) |sec| {
// This coff file has embedded DWARF debug info
_ = sec;
// TODO: free the section data slices
- const debug_info_data = di.coff.getSectionData(".debug_info", allocator) catch null;
- const debug_abbrev_data = di.coff.getSectionData(".debug_abbrev", allocator) catch null;
- const debug_str_data = di.coff.getSectionData(".debug_str", allocator) catch null;
- const debug_line_data = di.coff.getSectionData(".debug_line", allocator) catch null;
- const debug_line_str_data = di.coff.getSectionData(".debug_line_str", allocator) catch null;
- const debug_ranges_data = di.coff.getSectionData(".debug_ranges", allocator) catch null;
+ const debug_info = di.coff.getSectionDataAlloc(".debug_info", allocator) catch null;
+ const debug_abbrev = di.coff.getSectionDataAlloc(".debug_abbrev", allocator) catch null;
+ const debug_str = di.coff.getSectionDataAlloc(".debug_str", allocator) catch null;
+ const debug_str_offsets = di.coff.getSectionDataAlloc(".debug_str_offsets", allocator) catch null;
+ const debug_line = di.coff.getSectionDataAlloc(".debug_line", allocator) catch null;
+ const debug_line_str = di.coff.getSectionDataAlloc(".debug_line_str", allocator) catch null;
+ const debug_ranges = di.coff.getSectionDataAlloc(".debug_ranges", allocator) catch null;
+ const debug_loclists = di.coff.getSectionDataAlloc(".debug_loclists", allocator) catch null;
+ const debug_rnglists = di.coff.getSectionDataAlloc(".debug_rnglists", allocator) catch null;
+ const debug_addr = di.coff.getSectionDataAlloc(".debug_addr", allocator) catch null;
+ const debug_names = di.coff.getSectionDataAlloc(".debug_names", allocator) catch null;
+ const debug_frame = di.coff.getSectionDataAlloc(".debug_frame", allocator) catch null;
var dwarf = DW.DwarfInfo{
.endian = native_endian,
- .debug_info = debug_info_data orelse return error.MissingDebugInfo,
- .debug_abbrev = debug_abbrev_data orelse return error.MissingDebugInfo,
- .debug_str = debug_str_data orelse return error.MissingDebugInfo,
- .debug_line = debug_line_data orelse return error.MissingDebugInfo,
- .debug_line_str = debug_line_str_data,
- .debug_ranges = debug_ranges_data,
+ .debug_info = debug_info orelse return error.MissingDebugInfo,
+ .debug_abbrev = debug_abbrev orelse return error.MissingDebugInfo,
+ .debug_str = debug_str orelse return error.MissingDebugInfo,
+ .debug_str_offsets = debug_str_offsets,
+ .debug_line = debug_line orelse return error.MissingDebugInfo,
+ .debug_line_str = debug_line_str,
+ .debug_ranges = debug_ranges,
+ .debug_loclists = debug_loclists,
+ .debug_rnglists = debug_rnglists,
+ .debug_addr = debug_addr,
+ .debug_names = debug_names,
+ .debug_frame = debug_frame,
};
try DW.openDwarfDebugInfo(&dwarf, allocator);
di.debug_data = PdbOrDwarf{ .dwarf = dwarf };
@@ -863,7 +878,10 @@ fn readCoffDebugInfo(allocator: mem.Allocator, coff_file: File) !ModuleDebugInfo
defer allocator.free(path);
di.debug_data = PdbOrDwarf{ .pdb = undefined };
- di.debug_data.pdb = try pdb.Pdb.init(allocator, path);
+ di.debug_data.pdb = pdb.Pdb.init(allocator, path) catch |err| switch (err) {
+ error.FileNotFound, error.IsDir => return error.MissingDebugInfo,
+ else => return err,
+ };
try di.debug_data.pdb.parseInfoStream();
try di.debug_data.pdb.parseDbiStream();
@@ -912,9 +930,15 @@ pub fn readElfDebugInfo(allocator: mem.Allocator, elf_file: File) !ModuleDebugIn
var opt_debug_info: ?[]const u8 = null;
var opt_debug_abbrev: ?[]const u8 = null;
var opt_debug_str: ?[]const u8 = null;
+ var opt_debug_str_offsets: ?[]const u8 = null;
var opt_debug_line: ?[]const u8 = null;
var opt_debug_line_str: ?[]const u8 = null;
var opt_debug_ranges: ?[]const u8 = null;
+ var opt_debug_loclists: ?[]const u8 = null;
+ var opt_debug_rnglists: ?[]const u8 = null;
+ var opt_debug_addr: ?[]const u8 = null;
+ var opt_debug_names: ?[]const u8 = null;
+ var opt_debug_frame: ?[]const u8 = null;
for (shdrs) |*shdr| {
if (shdr.sh_type == elf.SHT_NULL) continue;
@@ -926,12 +950,24 @@ pub fn readElfDebugInfo(allocator: mem.Allocator, elf_file: File) !ModuleDebugIn
opt_debug_abbrev = try chopSlice(mapped_mem, shdr.sh_offset, shdr.sh_size);
} else if (mem.eql(u8, name, ".debug_str")) {
opt_debug_str = try chopSlice(mapped_mem, shdr.sh_offset, shdr.sh_size);
+ } else if (mem.eql(u8, name, ".debug_str_offsets")) {
+ opt_debug_str_offsets = try chopSlice(mapped_mem, shdr.sh_offset, shdr.sh_size);
} else if (mem.eql(u8, name, ".debug_line")) {
opt_debug_line = try chopSlice(mapped_mem, shdr.sh_offset, shdr.sh_size);
} else if (mem.eql(u8, name, ".debug_line_str")) {
opt_debug_line_str = try chopSlice(mapped_mem, shdr.sh_offset, shdr.sh_size);
} else if (mem.eql(u8, name, ".debug_ranges")) {
opt_debug_ranges = try chopSlice(mapped_mem, shdr.sh_offset, shdr.sh_size);
+ } else if (mem.eql(u8, name, ".debug_loclists")) {
+ opt_debug_loclists = try chopSlice(mapped_mem, shdr.sh_offset, shdr.sh_size);
+ } else if (mem.eql(u8, name, ".debug_rnglists")) {
+ opt_debug_rnglists = try chopSlice(mapped_mem, shdr.sh_offset, shdr.sh_size);
+ } else if (mem.eql(u8, name, ".debug_addr")) {
+ opt_debug_addr = try chopSlice(mapped_mem, shdr.sh_offset, shdr.sh_size);
+ } else if (mem.eql(u8, name, ".debug_names")) {
+ opt_debug_names = try chopSlice(mapped_mem, shdr.sh_offset, shdr.sh_size);
+ } else if (mem.eql(u8, name, ".debug_frame")) {
+ opt_debug_frame = try chopSlice(mapped_mem, shdr.sh_offset, shdr.sh_size);
}
}
@@ -940,9 +976,15 @@ pub fn readElfDebugInfo(allocator: mem.Allocator, elf_file: File) !ModuleDebugIn
.debug_info = opt_debug_info orelse return error.MissingDebugInfo,
.debug_abbrev = opt_debug_abbrev orelse return error.MissingDebugInfo,
.debug_str = opt_debug_str orelse return error.MissingDebugInfo,
+ .debug_str_offsets = opt_debug_str_offsets,
.debug_line = opt_debug_line orelse return error.MissingDebugInfo,
.debug_line_str = opt_debug_line_str,
.debug_ranges = opt_debug_ranges,
+ .debug_loclists = opt_debug_loclists,
+ .debug_rnglists = opt_debug_rnglists,
+ .debug_addr = opt_debug_addr,
+ .debug_names = opt_debug_names,
+ .debug_frame = opt_debug_frame,
};
try DW.openDwarfDebugInfo(&di, allocator);
@@ -968,24 +1010,20 @@ fn readMachODebugInfo(allocator: mem.Allocator, macho_file: File) !ModuleDebugIn
if (hdr.magic != macho.MH_MAGIC_64)
return error.InvalidDebugInfo;
- const hdr_base = @ptrCast([*]const u8, hdr);
- var ptr = hdr_base + @sizeOf(macho.mach_header_64);
- var ncmd: u32 = hdr.ncmds;
- const symtab = while (ncmd != 0) : (ncmd -= 1) {
- const lc = @ptrCast(*const std.macho.load_command, ptr);
- switch (lc.cmd) {
- .SYMTAB => break @ptrCast(*const std.macho.symtab_command, ptr),
- else => {},
- }
- ptr = @alignCast(@alignOf(std.macho.load_command), ptr + lc.cmdsize);
- } else {
- return error.MissingDebugInfo;
+ var it = macho.LoadCommandIterator{
+ .ncmds = hdr.ncmds,
+ .buffer = mapped_mem[@sizeOf(macho.mach_header_64)..][0..hdr.sizeofcmds],
};
+ const symtab = while (it.next()) |cmd| switch (cmd.cmd()) {
+ .SYMTAB => break cmd.cast(macho.symtab_command).?,
+ else => {},
+ } else return error.MissingDebugInfo;
+
const syms = @ptrCast(
[*]const macho.nlist_64,
- @alignCast(@alignOf(macho.nlist_64), hdr_base + symtab.symoff),
+ @alignCast(@alignOf(macho.nlist_64), &mapped_mem[symtab.symoff]),
)[0..symtab.nsyms];
- const strings = @ptrCast([*]const u8, hdr_base + symtab.stroff)[0 .. symtab.strsize - 1 :0];
+ const strings = mapped_mem[symtab.stroff..][0 .. symtab.strsize - 1 :0];
const symbols_buf = try allocator.alloc(MachoSymbol, syms.len);
@@ -1200,48 +1238,46 @@ pub const DebugInfo = struct {
if (address < base_address) continue;
const header = std.c._dyld_get_image_header(i) orelse continue;
- // The array of load commands is right after the header
- var cmd_ptr = @intToPtr([*]u8, @ptrToInt(header) + @sizeOf(macho.mach_header_64));
- var cmds = header.ncmds;
- while (cmds != 0) : (cmds -= 1) {
- const lc = @ptrCast(
- *macho.load_command,
- @alignCast(@alignOf(macho.load_command), cmd_ptr),
- );
- cmd_ptr += lc.cmdsize;
- if (lc.cmd != .SEGMENT_64) continue;
+ var it = macho.LoadCommandIterator{
+ .ncmds = header.ncmds,
+ .buffer = @alignCast(@alignOf(u64), @intToPtr(
+ [*]u8,
+ @ptrToInt(header) + @sizeOf(macho.mach_header_64),
+ ))[0..header.sizeofcmds],
+ };
+ while (it.next()) |cmd| switch (cmd.cmd()) {
+ .SEGMENT_64 => {
+ const segment_cmd = cmd.cast(macho.segment_command_64).?;
+ const rebased_address = address - base_address;
+ const seg_start = segment_cmd.vmaddr;
+ const seg_end = seg_start + segment_cmd.vmsize;
- const segment_cmd = @ptrCast(
- *const std.macho.segment_command_64,
- @alignCast(@alignOf(std.macho.segment_command_64), lc),
- );
+ if (rebased_address >= seg_start and rebased_address < seg_end) {
+ if (self.address_map.get(base_address)) |obj_di| {
+ return obj_di;
+ }
- const rebased_address = address - base_address;
- const seg_start = segment_cmd.vmaddr;
- const seg_end = seg_start + segment_cmd.vmsize;
+ const obj_di = try self.allocator.create(ModuleDebugInfo);
+ errdefer self.allocator.destroy(obj_di);
+
+ const macho_path = mem.sliceTo(std.c._dyld_get_image_name(i), 0);
+ const macho_file = fs.cwd().openFile(macho_path, .{
+ .intended_io_mode = .blocking,
+ }) catch |err| switch (err) {
+ error.FileNotFound => return error.MissingDebugInfo,
+ else => return err,
+ };
+ obj_di.* = try readMachODebugInfo(self.allocator, macho_file);
+ obj_di.base_address = base_address;
+
+ try self.address_map.putNoClobber(base_address, obj_di);
- if (rebased_address >= seg_start and rebased_address < seg_end) {
- if (self.address_map.get(base_address)) |obj_di| {
return obj_di;
}
-
- const obj_di = try self.allocator.create(ModuleDebugInfo);
- errdefer self.allocator.destroy(obj_di);
-
- const macho_path = mem.sliceTo(std.c._dyld_get_image_name(i), 0);
- const macho_file = fs.cwd().openFile(macho_path, .{ .intended_io_mode = .blocking }) catch |err| switch (err) {
- error.FileNotFound => return error.MissingDebugInfo,
- else => return err,
- };
- obj_di.* = try readMachODebugInfo(self.allocator, macho_file);
- obj_di.base_address = base_address;
-
- try self.address_map.putNoClobber(base_address, obj_di);
-
- return obj_di;
- }
- }
+ },
+ else => {},
+ };
}
return error.MissingDebugInfo;
@@ -1445,44 +1481,31 @@ pub const ModuleDebugInfo = switch (native_os) {
if (hdr.magic != std.macho.MH_MAGIC_64)
return error.InvalidDebugInfo;
- const hdr_base = @ptrCast([*]const u8, hdr);
- var ptr = hdr_base + @sizeOf(macho.mach_header_64);
- var segptr = ptr;
- var ncmd: u32 = hdr.ncmds;
- var segcmd: ?*const macho.segment_command_64 = null;
- var symtabcmd: ?*const macho.symtab_command = null;
-
- while (ncmd != 0) : (ncmd -= 1) {
- const lc = @ptrCast(*const std.macho.load_command, ptr);
- switch (lc.cmd) {
- .SEGMENT_64 => {
- segcmd = @ptrCast(
- *const std.macho.segment_command_64,
- @alignCast(@alignOf(std.macho.segment_command_64), ptr),
- );
- segptr = ptr;
- },
- .SYMTAB => {
- symtabcmd = @ptrCast(
- *const std.macho.symtab_command,
- @alignCast(@alignOf(std.macho.symtab_command), ptr),
- );
- },
- else => {},
- }
- ptr = @alignCast(@alignOf(std.macho.load_command), ptr + lc.cmdsize);
- }
+ var segcmd: ?macho.LoadCommandIterator.LoadCommand = null;
+ var symtabcmd: ?macho.symtab_command = null;
+ var it = macho.LoadCommandIterator{
+ .ncmds = hdr.ncmds,
+ .buffer = mapped_mem[@sizeOf(macho.mach_header_64)..][0..hdr.sizeofcmds],
+ };
+ while (it.next()) |cmd| switch (cmd.cmd()) {
+ .SEGMENT_64 => segcmd = cmd,
+ .SYMTAB => symtabcmd = cmd.cast(macho.symtab_command).?,
+ else => {},
+ };
if (segcmd == null or symtabcmd == null) return error.MissingDebugInfo;
// Parse symbols
const strtab = @ptrCast(
[*]const u8,
- hdr_base + symtabcmd.?.stroff,
+ &mapped_mem[symtabcmd.?.stroff],
)[0 .. symtabcmd.?.strsize - 1 :0];
const symtab = @ptrCast(
[*]const macho.nlist_64,
- @alignCast(@alignOf(macho.nlist_64), hdr_base + symtabcmd.?.symoff),
+ @alignCast(
+ @alignOf(macho.nlist_64),
+ &mapped_mem[symtabcmd.?.symoff],
+ ),
)[0..symtabcmd.?.nsyms];
// TODO handle tentative (common) symbols
@@ -1496,25 +1519,21 @@ pub const ModuleDebugInfo = switch (native_os) {
addr_table.putAssumeCapacityNoClobber(sym_name, sym.n_value);
}
- var opt_debug_line: ?*const macho.section_64 = null;
- var opt_debug_info: ?*const macho.section_64 = null;
- var opt_debug_abbrev: ?*const macho.section_64 = null;
- var opt_debug_str: ?*const macho.section_64 = null;
- var opt_debug_line_str: ?*const macho.section_64 = null;
- var opt_debug_ranges: ?*const macho.section_64 = null;
-
- const sections = @ptrCast(
- [*]const macho.section_64,
- @alignCast(@alignOf(macho.section_64), segptr + @sizeOf(std.macho.segment_command_64)),
- )[0..segcmd.?.nsects];
- for (sections) |*sect| {
- // The section name may not exceed 16 chars and a trailing null may
- // not be present
- const name = if (mem.indexOfScalar(u8, sect.sectname[0..], 0)) |last|
- sect.sectname[0..last]
- else
- sect.sectname[0..];
+ var opt_debug_line: ?macho.section_64 = null;
+ var opt_debug_info: ?macho.section_64 = null;
+ var opt_debug_abbrev: ?macho.section_64 = null;
+ var opt_debug_str: ?macho.section_64 = null;
+ var opt_debug_str_offsets: ?macho.section_64 = null;
+ var opt_debug_line_str: ?macho.section_64 = null;
+ var opt_debug_ranges: ?macho.section_64 = null;
+ var opt_debug_loclists: ?macho.section_64 = null;
+ var opt_debug_rnglists: ?macho.section_64 = null;
+ var opt_debug_addr: ?macho.section_64 = null;
+ var opt_debug_names: ?macho.section_64 = null;
+ var opt_debug_frame: ?macho.section_64 = null;
+ for (segcmd.?.getSections()) |sect| {
+ const name = sect.sectName();
if (mem.eql(u8, name, "__debug_line")) {
opt_debug_line = sect;
} else if (mem.eql(u8, name, "__debug_info")) {
@@ -1523,10 +1542,22 @@ pub const ModuleDebugInfo = switch (native_os) {
opt_debug_abbrev = sect;
} else if (mem.eql(u8, name, "__debug_str")) {
opt_debug_str = sect;
+ } else if (mem.eql(u8, name, "__debug_str_offsets")) {
+ opt_debug_str_offsets = sect;
} else if (mem.eql(u8, name, "__debug_line_str")) {
opt_debug_line_str = sect;
} else if (mem.eql(u8, name, "__debug_ranges")) {
opt_debug_ranges = sect;
+ } else if (mem.eql(u8, name, "__debug_loclists")) {
+ opt_debug_loclists = sect;
+ } else if (mem.eql(u8, name, "__debug_rnglists")) {
+ opt_debug_rnglists = sect;
+ } else if (mem.eql(u8, name, "__debug_addr")) {
+ opt_debug_addr = sect;
+ } else if (mem.eql(u8, name, "__debug_names")) {
+ opt_debug_names = sect;
+ } else if (mem.eql(u8, name, "__debug_frame")) {
+ opt_debug_frame = sect;
}
}
@@ -1544,6 +1575,10 @@ pub const ModuleDebugInfo = switch (native_os) {
.debug_info = try chopSlice(mapped_mem, debug_info.offset, debug_info.size),
.debug_abbrev = try chopSlice(mapped_mem, debug_abbrev.offset, debug_abbrev.size),
.debug_str = try chopSlice(mapped_mem, debug_str.offset, debug_str.size),
+ .debug_str_offsets = if (opt_debug_str_offsets) |debug_str_offsets|
+ try chopSlice(mapped_mem, debug_str_offsets.offset, debug_str_offsets.size)
+ else
+ null,
.debug_line = try chopSlice(mapped_mem, debug_line.offset, debug_line.size),
.debug_line_str = if (opt_debug_line_str) |debug_line_str|
try chopSlice(mapped_mem, debug_line_str.offset, debug_line_str.size)
@@ -1553,6 +1588,26 @@ pub const ModuleDebugInfo = switch (native_os) {
try chopSlice(mapped_mem, debug_ranges.offset, debug_ranges.size)
else
null,
+ .debug_loclists = if (opt_debug_loclists) |debug_loclists|
+ try chopSlice(mapped_mem, debug_loclists.offset, debug_loclists.size)
+ else
+ null,
+ .debug_rnglists = if (opt_debug_rnglists) |debug_rnglists|
+ try chopSlice(mapped_mem, debug_rnglists.offset, debug_rnglists.size)
+ else
+ null,
+ .debug_addr = if (opt_debug_addr) |debug_addr|
+ try chopSlice(mapped_mem, debug_addr.offset, debug_addr.size)
+ else
+ null,
+ .debug_names = if (opt_debug_names) |debug_names|
+ try chopSlice(mapped_mem, debug_names.offset, debug_names.size)
+ else
+ null,
+ .debug_frame = if (opt_debug_frame) |debug_frame|
+ try chopSlice(mapped_mem, debug_frame.offset, debug_frame.size)
+ else
+ null,
};
try DW.openDwarfDebugInfo(&di, allocator);
@@ -1607,6 +1662,8 @@ pub const ModuleDebugInfo = switch (native_os) {
.compile_unit_name = compile_unit.die.getAttrString(
o_file_di,
DW.AT.name,
+ o_file_di.debug_str,
+ compile_unit.*,
) catch |err| switch (err) {
error.MissingDebugInfo, error.InvalidDebugInfo => "???",
},
@@ -1647,7 +1704,7 @@ pub const ModuleDebugInfo = switch (native_os) {
switch (self.debug_data) {
.dwarf => |*dwarf| {
- const dwarf_address = relocated_address + self.coff.pe_header.image_base;
+ const dwarf_address = relocated_address + self.coff.getImageBase();
return getSymbolFromDwarf(allocator, dwarf_address, dwarf);
},
.pdb => {
@@ -1655,13 +1712,14 @@ pub const ModuleDebugInfo = switch (native_os) {
},
}
- var coff_section: *coff.Section = undefined;
+ var coff_section: *align(1) const coff.SectionHeader = undefined;
const mod_index = for (self.debug_data.pdb.sect_contribs) |sect_contrib| {
- if (sect_contrib.Section > self.coff.sections.items.len) continue;
+ const sections = self.coff.getSectionHeaders();
+ if (sect_contrib.Section > sections.len) continue;
// Remember that SectionContribEntry.Section is 1-based.
- coff_section = &self.coff.sections.items[sect_contrib.Section - 1];
+ coff_section = §ions[sect_contrib.Section - 1];
- const vaddr_start = coff_section.header.virtual_address + sect_contrib.Offset;
+ const vaddr_start = coff_section.virtual_address + sect_contrib.Offset;
const vaddr_end = vaddr_start + sect_contrib.Size;
if (relocated_address >= vaddr_start and relocated_address < vaddr_end) {
break sect_contrib.ModuleIndex;
@@ -1677,11 +1735,11 @@ pub const ModuleDebugInfo = switch (native_os) {
const symbol_name = self.debug_data.pdb.getSymbolName(
module,
- relocated_address - coff_section.header.virtual_address,
+ relocated_address - coff_section.virtual_address,
) orelse "???";
const opt_line_info = try self.debug_data.pdb.getLineNumberInfo(
module,
- relocated_address - coff_section.header.virtual_address,
+ relocated_address - coff_section.virtual_address,
);
return SymbolInfo{
@@ -1727,7 +1785,7 @@ fn getSymbolFromDwarf(allocator: mem.Allocator, address: u64, di: *DW.DwarfInfo)
if (nosuspend di.findCompileUnit(address)) |compile_unit| {
return SymbolInfo{
.symbol_name = nosuspend di.getSymbolName(address) orelse "???",
- .compile_unit_name = compile_unit.die.getAttrString(di, DW.AT.name) catch |err| switch (err) {
+ .compile_unit_name = compile_unit.die.getAttrString(di, DW.AT.name, di.debug_str, compile_unit.*) catch |err| switch (err) {
error.MissingDebugInfo, error.InvalidDebugInfo => "???",
},
.line_info = nosuspend di.getLineNumberInfo(allocator, compile_unit.*, address) catch |err| switch (err) {
@@ -1816,7 +1874,7 @@ fn resetSegfaultHandler() void {
return;
}
var act = os.Sigaction{
- .handler = .{ .sigaction = os.SIG.DFL },
+ .handler = .{ .handler = os.SIG.DFL },
.mask = os.empty_sigset,
.flags = 0,
};
@@ -1976,7 +2034,7 @@ noinline fn showMyTrace() usize {
/// For more advanced usage, see `ConfigurableTrace`.
pub const Trace = ConfigurableTrace(2, 4, builtin.mode == .Debug);
-pub fn ConfigurableTrace(comptime size: usize, comptime stack_frame_count: usize, comptime enabled: bool) type {
+pub fn ConfigurableTrace(comptime size: usize, comptime stack_frame_count: usize, comptime is_enabled: bool) type {
return struct {
addrs: [actual_size][stack_frame_count]usize = undefined,
notes: [actual_size][]const u8 = undefined,
@@ -1985,7 +2043,7 @@ pub fn ConfigurableTrace(comptime size: usize, comptime stack_frame_count: usize
const actual_size = if (enabled) size else 0;
const Index = if (enabled) usize else u0;
- pub const enabled = enabled;
+ pub const enabled = is_enabled;
pub const add = if (enabled) addNoInline else addNoOp;
diff --git a/lib/std/dwarf.zig b/lib/std/dwarf.zig
index d61d198c7e..c307cdb733 100644
--- a/lib/std/dwarf.zig
+++ b/lib/std/dwarf.zig
@@ -168,6 +168,11 @@ const CompileUnit = struct {
is_64: bool,
die: *Die,
pc_range: ?PcRange,
+
+ str_offsets_base: usize,
+ addr_base: usize,
+ rnglists_base: usize,
+ loclists_base: usize,
};
const AbbrevTable = std.ArrayList(AbbrevTableEntry);
@@ -205,6 +210,7 @@ const AbbrevAttr = struct {
const FormValue = union(enum) {
Address: u64,
+ AddrOffset: usize,
Block: []u8,
Const: Constant,
ExprLoc: []u8,
@@ -214,15 +220,46 @@ const FormValue = union(enum) {
RefAddr: u64,
String: []const u8,
StrPtr: u64,
+ StrOffset: usize,
LineStrPtr: u64,
+ LocListOffset: u64,
+ RangeListOffset: u64,
+ data16: [16]u8,
+
+ fn getString(fv: FormValue, di: DwarfInfo) ![]const u8 {
+ switch (fv) {
+ .String => |s| return s,
+ .StrPtr => |off| return di.getString(off),
+ .LineStrPtr => |off| return di.getLineString(off),
+ else => return badDwarf(),
+ }
+ }
+
+ fn getUInt(fv: FormValue, comptime U: type) !U {
+ switch (fv) {
+ .Const => |c| {
+ const int = try c.asUnsignedLe();
+ return math.cast(U, int) orelse return badDwarf();
+ },
+ .SecOffset => |x| return math.cast(U, x) orelse return badDwarf(),
+ else => return badDwarf(),
+ }
+ }
+
+ fn getData16(fv: FormValue) ![16]u8 {
+ switch (fv) {
+ .data16 => |d| return d,
+ else => return badDwarf(),
+ }
+ }
};
const Constant = struct {
payload: u64,
signed: bool,
- fn asUnsignedLe(self: *const Constant) !u64 {
- if (self.signed) return error.InvalidDebugInfo;
+ fn asUnsignedLe(self: Constant) !u64 {
+ if (self.signed) return badDwarf();
return self.payload;
}
};
@@ -251,21 +288,46 @@ const Die = struct {
return null;
}
- fn getAttrAddr(self: *const Die, id: u64) !u64 {
+ fn getAttrAddr(
+ self: *const Die,
+ di: *DwarfInfo,
+ id: u64,
+ compile_unit: CompileUnit,
+ ) error{ InvalidDebugInfo, MissingDebugInfo }!u64 {
const form_value = self.getAttr(id) orelse return error.MissingDebugInfo;
return switch (form_value.*) {
FormValue.Address => |value| value,
+ FormValue.AddrOffset => |index| {
+ const debug_addr = di.debug_addr orelse return badDwarf();
+ // addr_base points to the first item after the header, however we
+ // need to read the header to know the size of each item. Empirically,
+ // it may disagree with is_64 on the compile unit.
+ // The header is 8 or 12 bytes depending on is_64.
+ if (compile_unit.addr_base < 8) return badDwarf();
+
+ const version = mem.readInt(u16, debug_addr[compile_unit.addr_base - 4 ..][0..2], di.endian);
+ if (version != 5) return badDwarf();
+
+ const addr_size = debug_addr[compile_unit.addr_base - 2];
+ const seg_size = debug_addr[compile_unit.addr_base - 1];
+
+ const byte_offset = compile_unit.addr_base + (addr_size + seg_size) * index;
+ if (byte_offset + addr_size > debug_addr.len) return badDwarf();
+ switch (addr_size) {
+ 1 => return debug_addr[byte_offset],
+ 2 => return mem.readInt(u16, debug_addr[byte_offset..][0..2], di.endian),
+ 4 => return mem.readInt(u32, debug_addr[byte_offset..][0..4], di.endian),
+ 8 => return mem.readInt(u64, debug_addr[byte_offset..][0..8], di.endian),
+ else => return badDwarf(),
+ }
+ },
else => error.InvalidDebugInfo,
};
}
fn getAttrSecOffset(self: *const Die, id: u64) !u64 {
const form_value = self.getAttr(id) orelse return error.MissingDebugInfo;
- return switch (form_value.*) {
- FormValue.Const => |value| value.asUnsignedLe(),
- FormValue.SecOffset => |value| value,
- else => error.InvalidDebugInfo,
- };
+ return form_value.getUInt(u64);
}
fn getAttrUnsignedLe(self: *const Die, id: u64) !u64 {
@@ -284,22 +346,44 @@ const Die = struct {
};
}
- pub fn getAttrString(self: *const Die, di: *DwarfInfo, id: u64) ![]const u8 {
+ pub fn getAttrString(
+ self: *const Die,
+ di: *DwarfInfo,
+ id: u64,
+ opt_str: ?[]const u8,
+ compile_unit: CompileUnit,
+ ) error{ InvalidDebugInfo, MissingDebugInfo }![]const u8 {
const form_value = self.getAttr(id) orelse return error.MissingDebugInfo;
- return switch (form_value.*) {
- FormValue.String => |value| value,
- FormValue.StrPtr => |offset| di.getString(offset),
- FormValue.LineStrPtr => |offset| di.getLineString(offset),
- else => error.InvalidDebugInfo,
- };
+ switch (form_value.*) {
+ FormValue.String => |value| return value,
+ FormValue.StrPtr => |offset| return di.getString(offset),
+ FormValue.StrOffset => |index| {
+ const debug_str_offsets = di.debug_str_offsets orelse return badDwarf();
+ if (compile_unit.str_offsets_base == 0) return badDwarf();
+ if (compile_unit.is_64) {
+ const byte_offset = compile_unit.str_offsets_base + 8 * index;
+ if (byte_offset + 8 > debug_str_offsets.len) return badDwarf();
+ const offset = mem.readInt(u64, debug_str_offsets[byte_offset..][0..8], di.endian);
+ return getStringGeneric(opt_str, offset);
+ } else {
+ const byte_offset = compile_unit.str_offsets_base + 4 * index;
+ if (byte_offset + 4 > debug_str_offsets.len) return badDwarf();
+ const offset = mem.readInt(u32, debug_str_offsets[byte_offset..][0..4], di.endian);
+ return getStringGeneric(opt_str, offset);
+ }
+ },
+ FormValue.LineStrPtr => |offset| return di.getLineString(offset),
+ else => return badDwarf(),
+ }
}
};
const FileEntry = struct {
- file_name: []const u8,
- dir_index: usize,
- mtime: usize,
- len_bytes: usize,
+ path: []const u8,
+ dir_index: u32 = 0,
+ mtime: u64 = 0,
+ size: u64 = 0,
+ md5: [16]u8 = [1]u8{0} ** 16,
};
const LineNumberProgram = struct {
@@ -307,13 +391,14 @@ const LineNumberProgram = struct {
file: usize,
line: i64,
column: u64,
+ version: u16,
is_stmt: bool,
basic_block: bool,
end_sequence: bool,
default_is_stmt: bool,
target_address: u64,
- include_dirs: []const []const u8,
+ include_dirs: []const FileEntry,
prev_valid: bool,
prev_address: u64,
@@ -344,12 +429,18 @@ const LineNumberProgram = struct {
self.prev_end_sequence = undefined;
}
- pub fn init(is_stmt: bool, include_dirs: []const []const u8, target_address: u64) LineNumberProgram {
+ pub fn init(
+ is_stmt: bool,
+ include_dirs: []const FileEntry,
+ target_address: u64,
+ version: u16,
+ ) LineNumberProgram {
return LineNumberProgram{
.address = 0,
.file = 1,
.line = 1,
.column = 0,
+ .version = version,
.is_stmt = is_stmt,
.basic_block = false,
.end_sequence = false,
@@ -372,18 +463,24 @@ const LineNumberProgram = struct {
allocator: mem.Allocator,
file_entries: []const FileEntry,
) !?debug.LineInfo {
- if (self.prev_valid and self.target_address >= self.prev_address and self.target_address < self.address) {
- const file_entry = if (self.prev_file == 0) {
- return error.MissingDebugInfo;
- } else if (self.prev_file - 1 >= file_entries.len) {
- return error.InvalidDebugInfo;
- } else &file_entries[self.prev_file - 1];
+ if (self.prev_valid and
+ self.target_address >= self.prev_address and
+ self.target_address < self.address)
+ {
+ const file_index = if (self.version >= 5) self.prev_file else i: {
+ if (self.prev_file == 0) return missingDwarf();
+ break :i self.prev_file - 1;
+ };
- const dir_name = if (file_entry.dir_index >= self.include_dirs.len) {
- return error.InvalidDebugInfo;
- } else self.include_dirs[file_entry.dir_index];
+ if (file_index >= file_entries.len) return badDwarf();
+ const file_entry = &file_entries[file_index];
- const file_name = try fs.path.join(allocator, &[_][]const u8{ dir_name, file_entry.file_name });
+ if (file_entry.dir_index >= self.include_dirs.len) return badDwarf();
+ const dir_name = self.include_dirs[file_entry.dir_index].path;
+
+ const file_name = try fs.path.join(allocator, &[_][]const u8{
+ dir_name, file_entry.path,
+ });
return debug.LineInfo{
.line = if (self.prev_line >= 0) @intCast(u64, self.prev_line) else 0,
@@ -410,7 +507,7 @@ fn readUnitLength(in_stream: anytype, endian: std.builtin.Endian, is_64: *bool)
if (is_64.*) {
return in_stream.readInt(u64, endian);
} else {
- if (first_32_bits >= 0xfffffff0) return error.InvalidDebugInfo;
+ if (first_32_bits >= 0xfffffff0) return badDwarf();
// TODO this cast should not be needed
return @as(u64, first_32_bits);
}
@@ -487,6 +584,12 @@ fn parseFormValueRef(in_stream: anytype, endian: std.builtin.Endian, size: i32)
fn parseFormValue(allocator: mem.Allocator, in_stream: anytype, form_id: u64, endian: std.builtin.Endian, is_64: bool) anyerror!FormValue {
return switch (form_id) {
FORM.addr => FormValue{ .Address = try readAddress(in_stream, endian, @sizeOf(usize) == 8) },
+ FORM.addrx1 => return FormValue{ .AddrOffset = try in_stream.readInt(u8, endian) },
+ FORM.addrx2 => return FormValue{ .AddrOffset = try in_stream.readInt(u16, endian) },
+ FORM.addrx3 => return FormValue{ .AddrOffset = try in_stream.readInt(u24, endian) },
+ FORM.addrx4 => return FormValue{ .AddrOffset = try in_stream.readInt(u32, endian) },
+ FORM.addrx => return FormValue{ .AddrOffset = try nosuspend leb.readULEB128(usize, in_stream) },
+
FORM.block1 => parseFormValueBlock(allocator, in_stream, endian, 1),
FORM.block2 => parseFormValueBlock(allocator, in_stream, endian, 2),
FORM.block4 => parseFormValueBlock(allocator, in_stream, endian, 4),
@@ -498,6 +601,11 @@ fn parseFormValue(allocator: mem.Allocator, in_stream: anytype, form_id: u64, en
FORM.data2 => parseFormValueConstant(in_stream, false, endian, 2),
FORM.data4 => parseFormValueConstant(in_stream, false, endian, 4),
FORM.data8 => parseFormValueConstant(in_stream, false, endian, 8),
+ FORM.data16 => {
+ var buf: [16]u8 = undefined;
+ if ((try nosuspend in_stream.readAll(&buf)) < 16) return error.EndOfFile;
+ return FormValue{ .data16 = buf };
+ },
FORM.udata, FORM.sdata => {
const signed = form_id == FORM.sdata;
return parseFormValueConstant(in_stream, signed, endian, -1);
@@ -522,6 +630,11 @@ fn parseFormValue(allocator: mem.Allocator, in_stream: anytype, form_id: u64, en
FORM.string => FormValue{ .String = try in_stream.readUntilDelimiterAlloc(allocator, 0, math.maxInt(usize)) },
FORM.strp => FormValue{ .StrPtr = try readAddress(in_stream, endian, is_64) },
+ FORM.strx1 => return FormValue{ .StrOffset = try in_stream.readInt(u8, endian) },
+ FORM.strx2 => return FormValue{ .StrOffset = try in_stream.readInt(u16, endian) },
+ FORM.strx3 => return FormValue{ .StrOffset = try in_stream.readInt(u24, endian) },
+ FORM.strx4 => return FormValue{ .StrOffset = try in_stream.readInt(u32, endian) },
+ FORM.strx => return FormValue{ .StrOffset = try nosuspend leb.readULEB128(usize, in_stream) },
FORM.line_strp => FormValue{ .LineStrPtr = try readAddress(in_stream, endian, is_64) },
FORM.indirect => {
const child_form_id = try nosuspend leb.readULEB128(u64, in_stream);
@@ -534,9 +647,11 @@ fn parseFormValue(allocator: mem.Allocator, in_stream: anytype, form_id: u64, en
return await @asyncCall(frame, {}, parseFormValue, .{ allocator, in_stream, child_form_id, endian, is_64 });
},
FORM.implicit_const => FormValue{ .Const = Constant{ .signed = true, .payload = undefined } },
-
+ FORM.loclistx => return FormValue{ .LocListOffset = try nosuspend leb.readULEB128(u64, in_stream) },
+ FORM.rnglistx => return FormValue{ .RangeListOffset = try nosuspend leb.readULEB128(u64, in_stream) },
else => {
- return error.InvalidDebugInfo;
+ //std.debug.print("unrecognized form id: {x}\n", .{form_id});
+ return badDwarf();
},
};
}
@@ -554,9 +669,15 @@ pub const DwarfInfo = struct {
debug_info: []const u8,
debug_abbrev: []const u8,
debug_str: []const u8,
+ debug_str_offsets: ?[]const u8,
debug_line: []const u8,
debug_line_str: ?[]const u8,
debug_ranges: ?[]const u8,
+ debug_loclists: ?[]const u8,
+ debug_rnglists: ?[]const u8,
+ debug_addr: ?[]const u8,
+ debug_names: ?[]const u8,
+ debug_frame: ?[]const u8,
// Filled later by the initializer
abbrev_table_list: std.ArrayListUnmanaged(AbbrevTableHeader) = .{},
compile_unit_list: std.ArrayListUnmanaged(CompileUnit) = .{},
@@ -592,7 +713,7 @@ pub const DwarfInfo = struct {
fn scanAllFunctions(di: *DwarfInfo, allocator: mem.Allocator) !void {
var stream = io.fixedBufferStream(di.debug_info);
- const in = &stream.reader();
+ const in = stream.reader();
const seekable = &stream.seekableStream();
var this_unit_offset: u64 = 0;
@@ -609,29 +730,26 @@ pub const DwarfInfo = struct {
const next_offset = unit_length + (if (is_64) @as(usize, 12) else @as(usize, 4));
const version = try in.readInt(u16, di.endian);
- if (version < 2 or version > 5) return error.InvalidDebugInfo;
+ if (version < 2 or version > 5) return badDwarf();
var address_size: u8 = undefined;
var debug_abbrev_offset: u64 = undefined;
- switch (version) {
- 5 => {
- const unit_type = try in.readInt(u8, di.endian);
- if (unit_type != UT.compile) return error.InvalidDebugInfo;
- address_size = try in.readByte();
- debug_abbrev_offset = if (is_64)
- try in.readInt(u64, di.endian)
- else
- try in.readInt(u32, di.endian);
- },
- else => {
- debug_abbrev_offset = if (is_64)
- try in.readInt(u64, di.endian)
- else
- try in.readInt(u32, di.endian);
- address_size = try in.readByte();
- },
+ if (version >= 5) {
+ const unit_type = try in.readInt(u8, di.endian);
+ if (unit_type != UT.compile) return badDwarf();
+ address_size = try in.readByte();
+ debug_abbrev_offset = if (is_64)
+ try in.readInt(u64, di.endian)
+ else
+ try in.readInt(u32, di.endian);
+ } else {
+ debug_abbrev_offset = if (is_64)
+ try in.readInt(u64, di.endian)
+ else
+ try in.readInt(u32, di.endian);
+ address_size = try in.readByte();
}
- if (address_size != @sizeOf(usize)) return error.InvalidDebugInfo;
+ if (address_size != @sizeOf(usize)) return badDwarf();
const compile_unit_pos = try seekable.getPos();
const abbrev_table = try di.getAbbrevTable(allocator, debug_abbrev_offset);
@@ -640,11 +758,26 @@ pub const DwarfInfo = struct {
const next_unit_pos = this_unit_offset + next_offset;
+ var compile_unit: CompileUnit = undefined;
+
while ((try seekable.getPos()) < next_unit_pos) {
- const die_obj = (try di.parseDie(arena, in, abbrev_table, is_64)) orelse continue;
+ var die_obj = (try di.parseDie(arena, in, abbrev_table, is_64)) orelse continue;
const after_die_offset = try seekable.getPos();
switch (die_obj.tag_id) {
+ TAG.compile_unit => {
+ compile_unit = .{
+ .version = version,
+ .is_64 = is_64,
+ .die = &die_obj,
+ .pc_range = null,
+
+ .str_offsets_base = if (die_obj.getAttr(AT.str_offsets_base)) |fv| try fv.getUInt(usize) else 0,
+ .addr_base = if (die_obj.getAttr(AT.addr_base)) |fv| try fv.getUInt(usize) else 0,
+ .rnglists_base = if (die_obj.getAttr(AT.rnglists_base)) |fv| try fv.getUInt(usize) else 0,
+ .loclists_base = if (die_obj.getAttr(AT.loclists_base)) |fv| try fv.getUInt(usize) else 0,
+ };
+ },
TAG.subprogram, TAG.inlined_subroutine, TAG.subroutine, TAG.entry_point => {
const fn_name = x: {
var depth: i32 = 3;
@@ -652,30 +785,30 @@ pub const DwarfInfo = struct {
// Prevent endless loops
while (depth > 0) : (depth -= 1) {
if (this_die_obj.getAttr(AT.name)) |_| {
- const name = try this_die_obj.getAttrString(di, AT.name);
+ const name = try this_die_obj.getAttrString(di, AT.name, di.debug_str, compile_unit);
break :x try allocator.dupe(u8, name);
} else if (this_die_obj.getAttr(AT.abstract_origin)) |_| {
// Follow the DIE it points to and repeat
const ref_offset = try this_die_obj.getAttrRef(AT.abstract_origin);
- if (ref_offset > next_offset) return error.InvalidDebugInfo;
+ if (ref_offset > next_offset) return badDwarf();
try seekable.seekTo(this_unit_offset + ref_offset);
this_die_obj = (try di.parseDie(
arena,
in,
abbrev_table,
is_64,
- )) orelse return error.InvalidDebugInfo;
+ )) orelse return badDwarf();
} else if (this_die_obj.getAttr(AT.specification)) |_| {
// Follow the DIE it points to and repeat
const ref_offset = try this_die_obj.getAttrRef(AT.specification);
- if (ref_offset > next_offset) return error.InvalidDebugInfo;
+ if (ref_offset > next_offset) return badDwarf();
try seekable.seekTo(this_unit_offset + ref_offset);
this_die_obj = (try di.parseDie(
arena,
in,
abbrev_table,
is_64,
- )) orelse return error.InvalidDebugInfo;
+ )) orelse return badDwarf();
} else {
break :x null;
}
@@ -685,7 +818,7 @@ pub const DwarfInfo = struct {
};
const pc_range = x: {
- if (die_obj.getAttrAddr(AT.low_pc)) |low_pc| {
+ if (die_obj.getAttrAddr(di, AT.low_pc, compile_unit)) |low_pc| {
if (die_obj.getAttr(AT.high_pc)) |high_pc_value| {
const pc_end = switch (high_pc_value.*) {
FormValue.Address => |value| value,
@@ -693,7 +826,7 @@ pub const DwarfInfo = struct {
const offset = try value.asUnsignedLe();
break :b (low_pc + offset);
},
- else => return error.InvalidDebugInfo,
+ else => return badDwarf(),
};
break :x PcRange{
.start = low_pc,
@@ -738,29 +871,26 @@ pub const DwarfInfo = struct {
const next_offset = unit_length + (if (is_64) @as(usize, 12) else @as(usize, 4));
const version = try in.readInt(u16, di.endian);
- if (version < 2 or version > 5) return error.InvalidDebugInfo;
+ if (version < 2 or version > 5) return badDwarf();
var address_size: u8 = undefined;
var debug_abbrev_offset: u64 = undefined;
- switch (version) {
- 5 => {
- const unit_type = try in.readInt(u8, di.endian);
- if (unit_type != UT.compile) return error.InvalidDebugInfo;
- address_size = try in.readByte();
- debug_abbrev_offset = if (is_64)
- try in.readInt(u64, di.endian)
- else
- try in.readInt(u32, di.endian);
- },
- else => {
- debug_abbrev_offset = if (is_64)
- try in.readInt(u64, di.endian)
- else
- try in.readInt(u32, di.endian);
- address_size = try in.readByte();
- },
+ if (version >= 5) {
+ const unit_type = try in.readInt(u8, di.endian);
+ if (unit_type != UT.compile) return badDwarf();
+ address_size = try in.readByte();
+ debug_abbrev_offset = if (is_64)
+ try in.readInt(u64, di.endian)
+ else
+ try in.readInt(u32, di.endian);
+ } else {
+ debug_abbrev_offset = if (is_64)
+ try in.readInt(u64, di.endian)
+ else
+ try in.readInt(u32, di.endian);
+ address_size = try in.readByte();
}
- if (address_size != @sizeOf(usize)) return error.InvalidDebugInfo;
+ if (address_size != @sizeOf(usize)) return badDwarf();
const compile_unit_pos = try seekable.getPos();
const abbrev_table = try di.getAbbrevTable(allocator, debug_abbrev_offset);
@@ -770,12 +900,23 @@ pub const DwarfInfo = struct {
const compile_unit_die = try allocator.create(Die);
errdefer allocator.destroy(compile_unit_die);
compile_unit_die.* = (try di.parseDie(allocator, in, abbrev_table, is_64)) orelse
- return error.InvalidDebugInfo;
+ return badDwarf();
- if (compile_unit_die.tag_id != TAG.compile_unit) return error.InvalidDebugInfo;
+ if (compile_unit_die.tag_id != TAG.compile_unit) return badDwarf();
- const pc_range = x: {
- if (compile_unit_die.getAttrAddr(AT.low_pc)) |low_pc| {
+ var compile_unit: CompileUnit = .{
+ .version = version,
+ .is_64 = is_64,
+ .pc_range = null,
+ .die = compile_unit_die,
+ .str_offsets_base = if (compile_unit_die.getAttr(AT.str_offsets_base)) |fv| try fv.getUInt(usize) else 0,
+ .addr_base = if (compile_unit_die.getAttr(AT.addr_base)) |fv| try fv.getUInt(usize) else 0,
+ .rnglists_base = if (compile_unit_die.getAttr(AT.rnglists_base)) |fv| try fv.getUInt(usize) else 0,
+ .loclists_base = if (compile_unit_die.getAttr(AT.loclists_base)) |fv| try fv.getUInt(usize) else 0,
+ };
+
+ compile_unit.pc_range = x: {
+ if (compile_unit_die.getAttrAddr(di, AT.low_pc, compile_unit)) |low_pc| {
if (compile_unit_die.getAttr(AT.high_pc)) |high_pc_value| {
const pc_end = switch (high_pc_value.*) {
FormValue.Address => |value| value,
@@ -783,7 +924,7 @@ pub const DwarfInfo = struct {
const offset = try value.asUnsignedLe();
break :b (low_pc + offset);
},
- else => return error.InvalidDebugInfo,
+ else => return badDwarf(),
};
break :x PcRange{
.start = low_pc,
@@ -798,12 +939,7 @@ pub const DwarfInfo = struct {
}
};
- try di.compile_unit_list.append(allocator, CompileUnit{
- .version = version,
- .is_64 = is_64,
- .pc_range = pc_range,
- .die = compile_unit_die,
- });
+ try di.compile_unit_list.append(allocator, compile_unit);
this_unit_offset += next_offset;
}
@@ -824,7 +960,7 @@ pub const DwarfInfo = struct {
// specified by DW_AT.low_pc or to some other value encoded
// in the list itself.
// If no starting value is specified use zero.
- var base_address = compile_unit.die.getAttrAddr(AT.low_pc) catch |err| switch (err) {
+ var base_address = compile_unit.die.getAttrAddr(di, AT.low_pc, compile_unit.*) catch |err| switch (err) {
error.MissingDebugInfo => @as(u64, 0), // TODO https://github.com/ziglang/zig/issues/11135
else => return err,
};
@@ -852,7 +988,7 @@ pub const DwarfInfo = struct {
}
}
}
- return error.MissingDebugInfo;
+ return missingDwarf();
}
/// Gets an already existing AbbrevTable given the abbrev_offset, or if not found,
@@ -919,7 +1055,7 @@ pub const DwarfInfo = struct {
) !?Die {
const abbrev_code = try leb.readULEB128(u64, in_stream);
if (abbrev_code == 0) return null;
- const table_entry = getAbbrevTableEntry(abbrev_table, abbrev_code) orelse return error.InvalidDebugInfo;
+ const table_entry = getAbbrevTableEntry(abbrev_table, abbrev_code) orelse return badDwarf();
var result = Die{
// Lives as long as the Die.
@@ -956,7 +1092,7 @@ pub const DwarfInfo = struct {
const in = &stream.reader();
const seekable = &stream.seekableStream();
- const compile_unit_cwd = try compile_unit.die.getAttrString(di, AT.comp_dir);
+ const compile_unit_cwd = try compile_unit.die.getAttrString(di, AT.comp_dir, di.debug_line_str, compile_unit);
const line_info_offset = try compile_unit.die.getAttrSecOffset(AT.stmt_list);
try seekable.seekTo(line_info_offset);
@@ -964,18 +1100,25 @@ pub const DwarfInfo = struct {
var is_64: bool = undefined;
const unit_length = try readUnitLength(in, di.endian, &is_64);
if (unit_length == 0) {
- return error.MissingDebugInfo;
+ return missingDwarf();
}
const next_offset = unit_length + (if (is_64) @as(usize, 12) else @as(usize, 4));
const version = try in.readInt(u16, di.endian);
- if (version < 2 or version > 4) return error.InvalidDebugInfo;
+ if (version < 2) return badDwarf();
+
+ var addr_size: u8 = if (is_64) 8 else 4;
+ var seg_size: u8 = 0;
+ if (version >= 5) {
+ addr_size = try in.readByte();
+ seg_size = try in.readByte();
+ }
const prologue_length = if (is_64) try in.readInt(u64, di.endian) else try in.readInt(u32, di.endian);
const prog_start_offset = (try seekable.getPos()) + prologue_length;
const minimum_instruction_length = try in.readByte();
- if (minimum_instruction_length == 0) return error.InvalidDebugInfo;
+ if (minimum_instruction_length == 0) return badDwarf();
if (version >= 4) {
// maximum_operations_per_instruction
@@ -986,7 +1129,7 @@ pub const DwarfInfo = struct {
const line_base = try in.readByteSigned();
const line_range = try in.readByte();
- if (line_range == 0) return error.InvalidDebugInfo;
+ if (line_range == 0) return badDwarf();
const opcode_base = try in.readByte();
@@ -1004,36 +1147,120 @@ pub const DwarfInfo = struct {
defer tmp_arena.deinit();
const arena = tmp_arena.allocator();
- var include_directories = std.ArrayList([]const u8).init(arena);
- try include_directories.append(compile_unit_cwd);
+ var include_directories = std.ArrayList(FileEntry).init(arena);
+ var file_entries = std.ArrayList(FileEntry).init(arena);
- while (true) {
- const dir = try in.readUntilDelimiterAlloc(arena, 0, math.maxInt(usize));
- if (dir.len == 0) break;
- try include_directories.append(dir);
+ if (version < 5) {
+ try include_directories.append(.{ .path = compile_unit_cwd });
+
+ while (true) {
+ const dir = try in.readUntilDelimiterAlloc(arena, 0, math.maxInt(usize));
+ if (dir.len == 0) break;
+ try include_directories.append(.{ .path = dir });
+ }
+
+ while (true) {
+ const file_name = try in.readUntilDelimiterAlloc(arena, 0, math.maxInt(usize));
+ if (file_name.len == 0) break;
+ const dir_index = try leb.readULEB128(u32, in);
+ const mtime = try leb.readULEB128(u64, in);
+ const size = try leb.readULEB128(u64, in);
+ try file_entries.append(FileEntry{
+ .path = file_name,
+ .dir_index = dir_index,
+ .mtime = mtime,
+ .size = size,
+ });
+ }
+ } else {
+ const FileEntFmt = struct {
+ content_type_code: u8,
+ form_code: u16,
+ };
+ {
+ var dir_ent_fmt_buf: [10]FileEntFmt = undefined;
+ const directory_entry_format_count = try in.readByte();
+ if (directory_entry_format_count > dir_ent_fmt_buf.len) return badDwarf();
+ for (dir_ent_fmt_buf[0..directory_entry_format_count]) |*ent_fmt| {
+ ent_fmt.* = .{
+ .content_type_code = try leb.readULEB128(u8, in),
+ .form_code = try leb.readULEB128(u16, in),
+ };
+ }
+
+ const directories_count = try leb.readULEB128(usize, in);
+ try include_directories.ensureUnusedCapacity(directories_count);
+ {
+ var i: usize = 0;
+ while (i < directories_count) : (i += 1) {
+ var e: FileEntry = .{ .path = &.{} };
+ for (dir_ent_fmt_buf[0..directory_entry_format_count]) |ent_fmt| {
+ const form_value = try parseFormValue(
+ arena,
+ in,
+ ent_fmt.form_code,
+ di.endian,
+ is_64,
+ );
+ switch (ent_fmt.content_type_code) {
+ LNCT.path => e.path = try form_value.getString(di.*),
+ LNCT.directory_index => e.dir_index = try form_value.getUInt(u32),
+ LNCT.timestamp => e.mtime = try form_value.getUInt(u64),
+ LNCT.size => e.size = try form_value.getUInt(u64),
+ LNCT.MD5 => e.md5 = try form_value.getData16(),
+ else => continue,
+ }
+ }
+ include_directories.appendAssumeCapacity(e);
+ }
+ }
+ }
+
+ var file_ent_fmt_buf: [10]FileEntFmt = undefined;
+ const file_name_entry_format_count = try in.readByte();
+ if (file_name_entry_format_count > file_ent_fmt_buf.len) return badDwarf();
+ for (file_ent_fmt_buf[0..file_name_entry_format_count]) |*ent_fmt| {
+ ent_fmt.* = .{
+ .content_type_code = try leb.readULEB128(u8, in),
+ .form_code = try leb.readULEB128(u16, in),
+ };
+ }
+
+ const file_names_count = try leb.readULEB128(usize, in);
+ try file_entries.ensureUnusedCapacity(file_names_count);
+ {
+ var i: usize = 0;
+ while (i < file_names_count) : (i += 1) {
+ var e: FileEntry = .{ .path = &.{} };
+ for (file_ent_fmt_buf[0..file_name_entry_format_count]) |ent_fmt| {
+ const form_value = try parseFormValue(
+ arena,
+ in,
+ ent_fmt.form_code,
+ di.endian,
+ is_64,
+ );
+ switch (ent_fmt.content_type_code) {
+ LNCT.path => e.path = try form_value.getString(di.*),
+ LNCT.directory_index => e.dir_index = try form_value.getUInt(u32),
+ LNCT.timestamp => e.mtime = try form_value.getUInt(u64),
+ LNCT.size => e.size = try form_value.getUInt(u64),
+ LNCT.MD5 => e.md5 = try form_value.getData16(),
+ else => continue,
+ }
+ }
+ file_entries.appendAssumeCapacity(e);
+ }
+ }
}
- var file_entries = std.ArrayList(FileEntry).init(arena);
var prog = LineNumberProgram.init(
default_is_stmt,
include_directories.items,
target_address,
+ version,
);
- while (true) {
- const file_name = try in.readUntilDelimiterAlloc(arena, 0, math.maxInt(usize));
- if (file_name.len == 0) break;
- const dir_index = try leb.readULEB128(usize, in);
- const mtime = try leb.readULEB128(usize, in);
- const len_bytes = try leb.readULEB128(usize, in);
- try file_entries.append(FileEntry{
- .file_name = file_name,
- .dir_index = dir_index,
- .mtime = mtime,
- .len_bytes = len_bytes,
- });
- }
-
try seekable.seekTo(prog_start_offset);
const next_unit_pos = line_info_offset + next_offset;
@@ -1043,7 +1270,7 @@ pub const DwarfInfo = struct {
if (opcode == LNS.extended_op) {
const op_size = try leb.readULEB128(u64, in);
- if (op_size < 1) return error.InvalidDebugInfo;
+ if (op_size < 1) return badDwarf();
var sub_op = try in.readByte();
switch (sub_op) {
LNE.end_sequence => {
@@ -1056,19 +1283,19 @@ pub const DwarfInfo = struct {
prog.address = addr;
},
LNE.define_file => {
- const file_name = try in.readUntilDelimiterAlloc(arena, 0, math.maxInt(usize));
- const dir_index = try leb.readULEB128(usize, in);
- const mtime = try leb.readULEB128(usize, in);
- const len_bytes = try leb.readULEB128(usize, in);
+ const path = try in.readUntilDelimiterAlloc(arena, 0, math.maxInt(usize));
+ const dir_index = try leb.readULEB128(u32, in);
+ const mtime = try leb.readULEB128(u64, in);
+ const size = try leb.readULEB128(u64, in);
try file_entries.append(FileEntry{
- .file_name = file_name,
+ .path = path,
.dir_index = dir_index,
.mtime = mtime,
- .len_bytes = len_bytes,
+ .size = size,
});
},
else => {
- const fwd_amt = math.cast(isize, op_size - 1) orelse return error.InvalidDebugInfo;
+ const fwd_amt = math.cast(isize, op_size - 1) orelse return badDwarf();
try seekable.seekBy(fwd_amt);
},
}
@@ -1119,7 +1346,7 @@ pub const DwarfInfo = struct {
},
LNS.set_prologue_end => {},
else => {
- if (opcode - 1 >= standard_opcode_lengths.len) return error.InvalidDebugInfo;
+ if (opcode - 1 >= standard_opcode_lengths.len) return badDwarf();
const len_bytes = standard_opcode_lengths[opcode - 1];
try seekable.seekBy(len_bytes);
},
@@ -1127,36 +1354,15 @@ pub const DwarfInfo = struct {
}
}
- return error.MissingDebugInfo;
+ return missingDwarf();
}
- fn getString(di: *DwarfInfo, offset: u64) ![]const u8 {
- if (offset > di.debug_str.len)
- return error.InvalidDebugInfo;
- const casted_offset = math.cast(usize, offset) orelse
- return error.InvalidDebugInfo;
-
- // Valid strings always have a terminating zero byte
- if (mem.indexOfScalarPos(u8, di.debug_str, casted_offset, 0)) |last| {
- return di.debug_str[casted_offset..last];
- }
-
- return error.InvalidDebugInfo;
+ fn getString(di: DwarfInfo, offset: u64) ![]const u8 {
+ return getStringGeneric(di.debug_str, offset);
}
- fn getLineString(di: *DwarfInfo, offset: u64) ![]const u8 {
- const debug_line_str = di.debug_line_str orelse return error.InvalidDebugInfo;
- if (offset > debug_line_str.len)
- return error.InvalidDebugInfo;
- const casted_offset = math.cast(usize, offset) orelse
- return error.InvalidDebugInfo;
-
- // Valid strings always have a terminating zero byte
- if (mem.indexOfScalarPos(u8, debug_line_str, casted_offset, 0)) |last| {
- return debug_line_str[casted_offset..last];
- }
-
- return error.InvalidDebugInfo;
+ fn getLineString(di: DwarfInfo, offset: u64) ![]const u8 {
+ return getStringGeneric(di.debug_line_str, offset);
}
};
@@ -1166,3 +1372,24 @@ pub fn openDwarfDebugInfo(di: *DwarfInfo, allocator: mem.Allocator) !void {
try di.scanAllFunctions(allocator);
try di.scanAllCompileUnits(allocator);
}
+
+/// This function is to make it handy to comment out the return and make it
+/// into a crash when working on this file.
+fn badDwarf() error{InvalidDebugInfo} {
+ //std.os.abort(); // can be handy to uncomment when working on this file
+ return error.InvalidDebugInfo;
+}
+
+fn missingDwarf() error{MissingDebugInfo} {
+ //std.os.abort(); // can be handy to uncomment when working on this file
+ return error.MissingDebugInfo;
+}
+
+fn getStringGeneric(opt_str: ?[]const u8, offset: u64) ![:0]const u8 {
+ const str = opt_str orelse return badDwarf();
+ if (offset > str.len) return badDwarf();
+ const casted_offset = math.cast(usize, offset) orelse return badDwarf();
+ // Valid strings always have a terminating zero byte
+ const last = mem.indexOfScalarPos(u8, str, casted_offset, 0) orelse return badDwarf();
+ return str[casted_offset..last :0];
+}
diff --git a/lib/std/elf.zig b/lib/std/elf.zig
index d80ae2f6a0..4a9b7a498f 100644
--- a/lib/std/elf.zig
+++ b/lib/std/elf.zig
@@ -3,7 +3,7 @@ const io = std.io;
const os = std.os;
const math = std.math;
const mem = std.mem;
-const debug = std.debug;
+const assert = std.debug.assert;
const File = std.fs.File;
const native_endian = @import("builtin").target.cpu.arch.endian();
@@ -387,7 +387,7 @@ pub const Header = struct {
const machine = if (need_bswap) blk: {
const value = @enumToInt(hdr32.e_machine);
- break :blk @intToEnum(EM, @byteSwap(@TypeOf(value), value));
+ break :blk @intToEnum(EM, @byteSwap(value));
} else hdr32.e_machine;
return @as(Header, .{
@@ -406,7 +406,7 @@ pub const Header = struct {
}
};
-pub fn ProgramHeaderIterator(ParseSource: anytype) type {
+pub fn ProgramHeaderIterator(comptime ParseSource: anytype) type {
return struct {
elf_header: Header,
parse_source: ParseSource,
@@ -456,7 +456,7 @@ pub fn ProgramHeaderIterator(ParseSource: anytype) type {
};
}
-pub fn SectionHeaderIterator(ParseSource: anytype) type {
+pub fn SectionHeaderIterator(comptime ParseSource: anytype) type {
return struct {
elf_header: Header,
parse_source: ParseSource,
@@ -511,7 +511,7 @@ pub fn SectionHeaderIterator(ParseSource: anytype) type {
pub fn int(is_64: bool, need_bswap: bool, int_32: anytype, int_64: anytype) @TypeOf(int_64) {
if (is_64) {
if (need_bswap) {
- return @byteSwap(@TypeOf(int_64), int_64);
+ return @byteSwap(int_64);
} else {
return int_64;
}
@@ -522,7 +522,7 @@ pub fn int(is_64: bool, need_bswap: bool, int_32: anytype, int_64: anytype) @Typ
pub fn int32(need_bswap: bool, int_32: anytype, comptime Int64: anytype) Int64 {
if (need_bswap) {
- return @byteSwap(@TypeOf(int_32), int_32);
+ return @byteSwap(int_32);
} else {
return int_32;
}
@@ -872,14 +872,14 @@ pub const Elf_MIPS_ABIFlags_v0 = extern struct {
};
comptime {
- debug.assert(@sizeOf(Elf32_Ehdr) == 52);
- debug.assert(@sizeOf(Elf64_Ehdr) == 64);
+ assert(@sizeOf(Elf32_Ehdr) == 52);
+ assert(@sizeOf(Elf64_Ehdr) == 64);
- debug.assert(@sizeOf(Elf32_Phdr) == 32);
- debug.assert(@sizeOf(Elf64_Phdr) == 56);
+ assert(@sizeOf(Elf32_Phdr) == 32);
+ assert(@sizeOf(Elf64_Phdr) == 56);
- debug.assert(@sizeOf(Elf32_Shdr) == 40);
- debug.assert(@sizeOf(Elf64_Shdr) == 64);
+ assert(@sizeOf(Elf32_Shdr) == 40);
+ assert(@sizeOf(Elf64_Shdr) == 64);
}
pub const Auxv = switch (@sizeOf(usize)) {
diff --git a/lib/std/enums.zig b/lib/std/enums.zig
index 31bc367e64..08781767de 100644
--- a/lib/std/enums.zig
+++ b/lib/std/enums.zig
@@ -57,7 +57,7 @@ pub fn values(comptime E: type) []const E {
/// the total number of items which have no matching enum key (holes in the enum
/// numbering). So for example, if an enum has values 1, 2, 5, and 6, max_unused_slots
/// must be at least 3, to allow unused slots 0, 3, and 4.
-fn directEnumArrayLen(comptime E: type, comptime max_unused_slots: comptime_int) comptime_int {
+pub fn directEnumArrayLen(comptime E: type, comptime max_unused_slots: comptime_int) comptime_int {
var max_value: comptime_int = -1;
const max_usize: comptime_int = ~@as(usize, 0);
const fields = std.meta.fields(E);
diff --git a/lib/std/event/channel.zig b/lib/std/event/channel.zig
index fb478ed6ed..e1c147d25a 100644
--- a/lib/std/event/channel.zig
+++ b/lib/std/event/channel.zig
@@ -56,7 +56,7 @@ pub fn Channel(comptime T: type) type {
pub fn init(self: *SelfChannel, buffer: []T) void {
// The ring buffer implementation only works with power of 2 buffer sizes
// because of relying on subtracting across zero. For example (0 -% 1) % 10 == 5
- assert(buffer.len == 0 or @popCount(usize, buffer.len) == 1);
+ assert(buffer.len == 0 or @popCount(buffer.len) == 1);
self.* = SelfChannel{
.buffer_len = 0,
diff --git a/lib/std/fmt.zig b/lib/std/fmt.zig
index a1269ad252..9133477017 100644
--- a/lib/std/fmt.zig
+++ b/lib/std/fmt.zig
@@ -195,7 +195,7 @@ pub fn format(
}
if (comptime arg_state.hasUnusedArgs()) {
- const missing_count = arg_state.args_len - @popCount(ArgSetType, arg_state.used_args);
+ const missing_count = arg_state.args_len - @popCount(arg_state.used_args);
switch (missing_count) {
0 => unreachable,
1 => @compileError("unused argument in '" ++ fmt ++ "'"),
@@ -380,7 +380,7 @@ const ArgState = struct {
args_len: usize,
fn hasUnusedArgs(self: *@This()) bool {
- return @popCount(ArgSetType, self.used_args) != self.args_len;
+ return @popCount(self.used_args) != self.args_len;
}
fn nextArg(self: *@This(), arg_index: ?usize) ?usize {
diff --git a/lib/std/fmt/parse_float/convert_eisel_lemire.zig b/lib/std/fmt/parse_float/convert_eisel_lemire.zig
index 724be45181..ff71695303 100644
--- a/lib/std/fmt/parse_float/convert_eisel_lemire.zig
+++ b/lib/std/fmt/parse_float/convert_eisel_lemire.zig
@@ -36,7 +36,7 @@ pub fn convertEiselLemire(comptime T: type, q: i64, w_: u64) ?BiasedFp(f64) {
}
// Normalize our significant digits, so the most-significant bit is set.
- const lz = @clz(u64, @bitCast(u64, w));
+ const lz = @clz(@bitCast(u64, w));
w = math.shl(u64, w, lz);
const r = computeProductApprox(q, w, float_info.mantissa_explicit_bits + 3);
diff --git a/lib/std/fs.zig b/lib/std/fs.zig
index 0968e16812..b1e88d2e01 100644
--- a/lib/std/fs.zig
+++ b/lib/std/fs.zig
@@ -877,8 +877,9 @@ pub const IterableDir = struct {
/// a reference to the path.
pub fn next(self: *Walker) !?WalkerEntry {
while (self.stack.items.len != 0) {
- // `top` becomes invalid after appending to `self.stack`
+ // `top` and `containing` become invalid after appending to `self.stack`
var top = &self.stack.items[self.stack.items.len - 1];
+ var containing = top;
var dirname_len = top.dirname_len;
if (try top.iter.next()) |base| {
self.name_buffer.shrinkRetainingCapacity(dirname_len);
@@ -899,10 +900,11 @@ pub const IterableDir = struct {
.dirname_len = self.name_buffer.items.len,
});
top = &self.stack.items[self.stack.items.len - 1];
+ containing = &self.stack.items[self.stack.items.len - 2];
}
}
return WalkerEntry{
- .dir = top.iter.dir,
+ .dir = containing.iter.dir,
.basename = self.name_buffer.items[dirname_len..],
.path = self.name_buffer.items,
.kind = base.kind,
diff --git a/lib/std/fs/path.zig b/lib/std/fs/path.zig
index d5583dcc80..9dc3367688 100644
--- a/lib/std/fs/path.zig
+++ b/lib/std/fs/path.zig
@@ -42,7 +42,7 @@ pub fn isSep(byte: u8) bool {
/// This is different from mem.join in that the separator will not be repeated if
/// it is found at the end or beginning of a pair of consecutive paths.
-fn joinSepMaybeZ(allocator: Allocator, separator: u8, sepPredicate: fn (u8) bool, paths: []const []const u8, zero: bool) ![]u8 {
+fn joinSepMaybeZ(allocator: Allocator, separator: u8, comptime sepPredicate: fn (u8) bool, paths: []const []const u8, zero: bool) ![]u8 {
if (paths.len == 0) return if (zero) try allocator.dupe(u8, &[1]u8{0}) else &[0]u8{};
// Find first non-empty path index.
diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig
index 538ce1bf5e..a7686080c1 100644
--- a/lib/std/fs/test.zig
+++ b/lib/std/fs/test.zig
@@ -1058,6 +1058,9 @@ test "walker" {
std.debug.print("found unexpected path: {s}\n", .{std.fmt.fmtSliceEscapeLower(entry.path)});
return err;
};
+ // make sure that the entry.dir is the containing dir
+ var entry_dir = try entry.dir.openDir(entry.basename, .{});
+ defer entry_dir.close();
num_walked += 1;
}
try testing.expectEqual(expected_paths.kvs.len, num_walked);
diff --git a/lib/std/hash/auto_hash.zig b/lib/std/hash/auto_hash.zig
index c4060e9182..32f4f4378f 100644
--- a/lib/std/hash/auto_hash.zig
+++ b/lib/std/hash/auto_hash.zig
@@ -30,13 +30,15 @@ pub fn hashPointer(hasher: anytype, key: anytype, comptime strat: HashStrategy)
.DeepRecursive => hash(hasher, key.*, .DeepRecursive),
},
- .Slice => switch (strat) {
- .Shallow => {
- hashPointer(hasher, key.ptr, .Shallow);
- hash(hasher, key.len, .Shallow);
- },
- .Deep => hashArray(hasher, key, .Shallow),
- .DeepRecursive => hashArray(hasher, key, .DeepRecursive),
+ .Slice => {
+ switch (strat) {
+ .Shallow => {
+ hashPointer(hasher, key.ptr, .Shallow);
+ },
+ .Deep => hashArray(hasher, key, .Shallow),
+ .DeepRecursive => hashArray(hasher, key, .DeepRecursive),
+ }
+ hash(hasher, key.len, .Shallow);
},
.Many,
@@ -53,17 +55,8 @@ pub fn hashPointer(hasher: anytype, key: anytype, comptime strat: HashStrategy)
/// Helper function to hash a set of contiguous objects, from an array or slice.
pub fn hashArray(hasher: anytype, key: anytype, comptime strat: HashStrategy) void {
- switch (strat) {
- .Shallow => {
- for (key) |element| {
- hash(hasher, element, .Shallow);
- }
- },
- else => {
- for (key) |element| {
- hash(hasher, element, strat);
- }
- },
+ for (key) |element| {
+ hash(hasher, element, strat);
}
}
@@ -193,8 +186,8 @@ fn typeContainsSlice(comptime K: type) bool {
pub fn autoHash(hasher: anytype, key: anytype) void {
const Key = @TypeOf(key);
if (comptime typeContainsSlice(Key)) {
- @compileError("std.auto_hash.autoHash does not allow slices as well as unions and structs containing slices here (" ++ @typeName(Key) ++
- ") because the intent is unclear. Consider using std.auto_hash.hash or providing your own hash function instead.");
+ @compileError("std.hash.autoHash does not allow slices as well as unions and structs containing slices here (" ++ @typeName(Key) ++
+ ") because the intent is unclear. Consider using std.hash.autoHashStrat or providing your own hash function instead.");
}
hash(hasher, key, .Shallow);
@@ -359,6 +352,12 @@ test "testHash array" {
try testing.expectEqual(h, hasher.final());
}
+test "testHash multi-dimensional array" {
+ const a = [_][]const u32{ &.{ 1, 2, 3 }, &.{ 4, 5 } };
+ const b = [_][]const u32{ &.{ 1, 2 }, &.{ 3, 4, 5 } };
+ try testing.expect(testHash(a) != testHash(b));
+}
+
test "testHash struct" {
const Foo = struct {
a: u32 = 1,
diff --git a/lib/std/hash/cityhash.zig b/lib/std/hash/cityhash.zig
index 8a535f372e..7c8e42f105 100644
--- a/lib/std/hash/cityhash.zig
+++ b/lib/std/hash/cityhash.zig
@@ -143,9 +143,9 @@ pub const CityHash32 = struct {
h = rotr32(h, 19);
h = h *% 5 +% 0xe6546b64;
g ^= b4;
- g = @byteSwap(u32, g) *% 5;
+ g = @byteSwap(g) *% 5;
h +%= b4 *% 5;
- h = @byteSwap(u32, h);
+ h = @byteSwap(h);
f +%= b0;
const t: u32 = h;
h = f;
@@ -252,11 +252,11 @@ pub const CityHash64 = struct {
const u: u64 = rotr64(a +% g, 43) +% (rotr64(b, 30) +% c) *% 9;
const v: u64 = ((a +% g) ^ d) +% f +% 1;
- const w: u64 = @byteSwap(u64, (u +% v) *% mul) +% h;
+ const w: u64 = @byteSwap((u +% v) *% mul) +% h;
const x: u64 = rotr64(e +% f, 42) +% c;
- const y: u64 = (@byteSwap(u64, (v +% w) *% mul) +% g) *% mul;
+ const y: u64 = (@byteSwap((v +% w) *% mul) +% g) *% mul;
const z: u64 = e +% f +% c;
- const a1: u64 = @byteSwap(u64, (x +% z) *% mul +% y) +% b;
+ const a1: u64 = @byteSwap((x +% z) *% mul +% y) +% b;
const b1: u64 = shiftmix((z +% a1) *% mul +% d +% h) *% mul;
return b1 +% x;
}
diff --git a/lib/std/hash/murmur.zig b/lib/std/hash/murmur.zig
index adb150446d..44b411bb2c 100644
--- a/lib/std/hash/murmur.zig
+++ b/lib/std/hash/murmur.zig
@@ -19,7 +19,7 @@ pub const Murmur2_32 = struct {
for (@ptrCast([*]align(1) const u32, str.ptr)[0..(len >> 2)]) |v| {
var k1: u32 = v;
if (native_endian == .Big)
- k1 = @byteSwap(u32, k1);
+ k1 = @byteSwap(k1);
k1 *%= m;
k1 ^= k1 >> 24;
k1 *%= m;
@@ -104,7 +104,7 @@ pub const Murmur2_64 = struct {
for (@ptrCast([*]align(1) const u64, str.ptr)[0..@intCast(usize, len >> 3)]) |v| {
var k1: u64 = v;
if (native_endian == .Big)
- k1 = @byteSwap(u64, k1);
+ k1 = @byteSwap(k1);
k1 *%= m;
k1 ^= k1 >> 47;
k1 *%= m;
@@ -117,7 +117,7 @@ pub const Murmur2_64 = struct {
var k1: u64 = 0;
@memcpy(@ptrCast([*]u8, &k1), @ptrCast([*]const u8, &str[@intCast(usize, offset)]), @intCast(usize, rest));
if (native_endian == .Big)
- k1 = @byteSwap(u64, k1);
+ k1 = @byteSwap(k1);
h1 ^= k1;
h1 *%= m;
}
@@ -184,7 +184,7 @@ pub const Murmur3_32 = struct {
for (@ptrCast([*]align(1) const u32, str.ptr)[0..(len >> 2)]) |v| {
var k1: u32 = v;
if (native_endian == .Big)
- k1 = @byteSwap(u32, k1);
+ k1 = @byteSwap(k1);
k1 *%= c1;
k1 = rotl32(k1, 15);
k1 *%= c2;
@@ -296,7 +296,7 @@ fn SMHasherTest(comptime hash_fn: anytype, comptime hashbits: u32) u32 {
var h = hash_fn(key[0..i], 256 - i);
if (native_endian == .Big)
- h = @byteSwap(@TypeOf(h), h);
+ h = @byteSwap(h);
@memcpy(@ptrCast([*]u8, &hashes[i * hashbytes]), @ptrCast([*]u8, &h), hashbytes);
}
@@ -310,8 +310,8 @@ test "murmur2_32" {
var v0le: u32 = v0;
var v1le: u64 = v1;
if (native_endian == .Big) {
- v0le = @byteSwap(u32, v0le);
- v1le = @byteSwap(u64, v1le);
+ v0le = @byteSwap(v0le);
+ v1le = @byteSwap(v1le);
}
try testing.expectEqual(Murmur2_32.hash(@ptrCast([*]u8, &v0le)[0..4]), Murmur2_32.hashUint32(v0));
try testing.expectEqual(Murmur2_32.hash(@ptrCast([*]u8, &v1le)[0..8]), Murmur2_32.hashUint64(v1));
@@ -324,8 +324,8 @@ test "murmur2_64" {
var v0le: u32 = v0;
var v1le: u64 = v1;
if (native_endian == .Big) {
- v0le = @byteSwap(u32, v0le);
- v1le = @byteSwap(u64, v1le);
+ v0le = @byteSwap(v0le);
+ v1le = @byteSwap(v1le);
}
try testing.expectEqual(Murmur2_64.hash(@ptrCast([*]u8, &v0le)[0..4]), Murmur2_64.hashUint32(v0));
try testing.expectEqual(Murmur2_64.hash(@ptrCast([*]u8, &v1le)[0..8]), Murmur2_64.hashUint64(v1));
@@ -338,8 +338,8 @@ test "murmur3_32" {
var v0le: u32 = v0;
var v1le: u64 = v1;
if (native_endian == .Big) {
- v0le = @byteSwap(u32, v0le);
- v1le = @byteSwap(u64, v1le);
+ v0le = @byteSwap(v0le);
+ v1le = @byteSwap(v1le);
}
try testing.expectEqual(Murmur3_32.hash(@ptrCast([*]u8, &v0le)[0..4]), Murmur3_32.hashUint32(v0));
try testing.expectEqual(Murmur3_32.hash(@ptrCast([*]u8, &v1le)[0..8]), Murmur3_32.hashUint64(v1));
diff --git a/lib/std/heap.zig b/lib/std/heap.zig
index b71cdb7932..e0e8cf51cc 100644
--- a/lib/std/heap.zig
+++ b/lib/std/heap.zig
@@ -479,7 +479,7 @@ const WasmPageAllocator = struct {
@setCold(true);
for (self.data) |segment, i| {
const spills_into_next = @bitCast(i128, segment) < 0;
- const has_enough_bits = @popCount(u128, segment) >= num_pages;
+ const has_enough_bits = @popCount(segment) >= num_pages;
if (!spills_into_next and !has_enough_bits) continue;
@@ -1185,7 +1185,7 @@ pub fn testAllocatorLargeAlignment(base_allocator: mem.Allocator) !void {
const large_align = @as(u29, mem.page_size << 2);
var align_mask: usize = undefined;
- _ = @shlWithOverflow(usize, ~@as(usize, 0), @as(USizeShift, @ctz(u29, large_align)), &align_mask);
+ _ = @shlWithOverflow(usize, ~@as(usize, 0), @as(USizeShift, @ctz(large_align)), &align_mask);
var slice = try allocator.alignedAlloc(u8, large_align, 500);
try testing.expect(@ptrToInt(slice.ptr) & align_mask == @ptrToInt(slice.ptr));
diff --git a/lib/std/io/bit_reader.zig b/lib/std/io/bit_reader.zig
index 15262f67a2..e897850b83 100644
--- a/lib/std/io/bit_reader.zig
+++ b/lib/std/io/bit_reader.zig
@@ -7,7 +7,7 @@ const meta = std.meta;
const math = std.math;
/// Creates a stream which allows for reading bit fields from another stream
-pub fn BitReader(endian: std.builtin.Endian, comptime ReaderType: type) type {
+pub fn BitReader(comptime endian: std.builtin.Endian, comptime ReaderType: type) type {
return struct {
forward_reader: ReaderType,
bit_buffer: u7,
diff --git a/lib/std/io/bit_writer.zig b/lib/std/io/bit_writer.zig
index d71afe5fe5..0be2e7ab08 100644
--- a/lib/std/io/bit_writer.zig
+++ b/lib/std/io/bit_writer.zig
@@ -7,7 +7,7 @@ const meta = std.meta;
const math = std.math;
/// Creates a stream which allows for writing bit fields to another stream
-pub fn BitWriter(endian: std.builtin.Endian, comptime WriterType: type) type {
+pub fn BitWriter(comptime endian: std.builtin.Endian, comptime WriterType: type) type {
return struct {
forward_writer: WriterType,
bit_buffer: u8,
diff --git a/lib/std/io/reader.zig b/lib/std/io/reader.zig
index 16acef8e48..6acc004851 100644
--- a/lib/std/io/reader.zig
+++ b/lib/std/io/reader.zig
@@ -247,6 +247,27 @@ pub fn Reader(
return bytes;
}
+ /// Reads bytes into the bounded array, until
+ /// the bounded array is full, or the stream ends.
+ pub fn readIntoBoundedBytes(
+ self: Self,
+ comptime num_bytes: usize,
+ bounded: *std.BoundedArray(u8, num_bytes),
+ ) !void {
+ while (bounded.len < num_bytes) {
+ const bytes_read = try self.read(bounded.unusedCapacitySlice());
+ if (bytes_read == 0) return;
+ bounded.len += bytes_read;
+ }
+ }
+
+ /// Reads at most `num_bytes` and returns as a bounded array.
+ pub fn readBoundedBytes(self: Self, comptime num_bytes: usize) !std.BoundedArray(u8, num_bytes) {
+ var result = std.BoundedArray(u8, num_bytes){};
+ try self.readIntoBoundedBytes(num_bytes, &result);
+ return result;
+ }
+
/// Reads a native-endian integer
pub fn readIntNative(self: Self, comptime T: type) !T {
const bytes = try self.readBytesNoEof((@typeInfo(T).Int.bits + 7) / 8);
diff --git a/lib/std/leb128.zig b/lib/std/leb128.zig
index b18ca9fab0..8e3a007849 100644
--- a/lib/std/leb128.zig
+++ b/lib/std/leb128.zig
@@ -317,7 +317,7 @@ fn test_write_leb128(value: anytype) !void {
const bytes_needed = bn: {
if (@typeInfo(T).Int.bits <= 7) break :bn @as(u16, 1);
- const unused_bits = if (value < 0) @clz(T, ~value) else @clz(T, value);
+ const unused_bits = if (value < 0) @clz(~value) else @clz(value);
const used_bits: u16 = (@typeInfo(T).Int.bits - unused_bits) + @boolToInt(t_signed);
if (used_bits <= 7) break :bn @as(u16, 1);
break :bn ((used_bits + 6) / 7);
diff --git a/lib/std/math.zig b/lib/std/math.zig
index 40b5eb9204..d1c9326ae2 100644
--- a/lib/std/math.zig
+++ b/lib/std/math.zig
@@ -1146,7 +1146,7 @@ pub fn ceilPowerOfTwoPromote(comptime T: type, value: T) std.meta.Int(@typeInfo(
assert(value != 0);
const PromotedType = std.meta.Int(@typeInfo(T).Int.signedness, @typeInfo(T).Int.bits + 1);
const ShiftType = std.math.Log2Int(PromotedType);
- return @as(PromotedType, 1) << @intCast(ShiftType, @typeInfo(T).Int.bits - @clz(T, value - 1));
+ return @as(PromotedType, 1) << @intCast(ShiftType, @typeInfo(T).Int.bits - @clz(value - 1));
}
/// Returns the next power of two (if the value is not already a power of two).
@@ -1212,7 +1212,7 @@ pub fn log2_int(comptime T: type, x: T) Log2Int(T) {
if (@typeInfo(T) != .Int or @typeInfo(T).Int.signedness != .unsigned)
@compileError("log2_int requires an unsigned integer, found " ++ @typeName(T));
assert(x != 0);
- return @intCast(Log2Int(T), @typeInfo(T).Int.bits - 1 - @clz(T, x));
+ return @intCast(Log2Int(T), @typeInfo(T).Int.bits - 1 - @clz(x));
}
/// Return the log base 2 of integer value x, rounding up to the
@@ -1548,7 +1548,7 @@ test "boolMask" {
}
/// Return the mod of `num` with the smallest integer type
-pub fn comptimeMod(num: anytype, denom: comptime_int) IntFittingRange(0, denom - 1) {
+pub fn comptimeMod(num: anytype, comptime denom: comptime_int) IntFittingRange(0, denom - 1) {
return @intCast(IntFittingRange(0, denom - 1), @mod(num, denom));
}
diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig
index 2a141ac243..ee6ccaacac 100644
--- a/lib/std/math/big/int.zig
+++ b/lib/std/math/big/int.zig
@@ -887,7 +887,7 @@ pub const Mutable = struct {
var sum: Limb = 0;
for (r.limbs[0..r.len]) |limb| {
- sum += @popCount(Limb, limb);
+ sum += @popCount(limb);
}
r.set(sum);
}
@@ -1520,7 +1520,7 @@ pub const Mutable = struct {
) void {
// 0.
// Normalize so that y[t] > b/2
- const lz = @clz(Limb, y.limbs[y.len - 1]);
+ const lz = @clz(y.limbs[y.len - 1]);
const norm_shift = if (lz == 0 and y.toConst().isOdd())
limb_bits // Force an extra limb so that y is even.
else
@@ -1917,7 +1917,7 @@ pub const Const = struct {
/// Returns the number of bits required to represent the absolute value of an integer.
pub fn bitCountAbs(self: Const) usize {
- return (self.limbs.len - 1) * limb_bits + (limb_bits - @clz(Limb, self.limbs[self.limbs.len - 1]));
+ return (self.limbs.len - 1) * limb_bits + (limb_bits - @clz(self.limbs[self.limbs.len - 1]));
}
/// Returns the number of bits required to represent the integer in twos-complement form.
@@ -1936,9 +1936,9 @@ pub const Const = struct {
if (!self.positive) block: {
bits += 1;
- if (@popCount(Limb, self.limbs[self.limbs.len - 1]) == 1) {
+ if (@popCount(self.limbs[self.limbs.len - 1]) == 1) {
for (self.limbs[0 .. self.limbs.len - 1]) |limb| {
- if (@popCount(Limb, limb) != 0) {
+ if (@popCount(limb) != 0) {
break :block;
}
}
@@ -3895,8 +3895,8 @@ fn llpow(r: []Limb, a: []const Limb, b: u32, tmp_limbs: []Limb) void {
// The initial assignment makes the result end in `r` so an extra memory
// copy is saved, each 1 flips the index twice so it's only the zeros that
// matter.
- const b_leading_zeros = @clz(u32, b);
- const exp_zeros = @popCount(u32, ~b) - b_leading_zeros;
+ const b_leading_zeros = @clz(b);
+ const exp_zeros = @popCount(~b) - b_leading_zeros;
if (exp_zeros & 1 != 0) {
tmp1 = tmp_limbs;
tmp2 = r;
diff --git a/lib/std/math/float.zig b/lib/std/math/float.zig
index 30e456fcbd..768cc03285 100644
--- a/lib/std/math/float.zig
+++ b/lib/std/math/float.zig
@@ -8,7 +8,7 @@ inline fn mantissaOne(comptime T: type) comptime_int {
}
/// Creates floating point type T from an unbiased exponent and raw mantissa.
-inline fn reconstructFloat(comptime T: type, exponent: comptime_int, mantissa: comptime_int) T {
+inline fn reconstructFloat(comptime T: type, comptime exponent: comptime_int, comptime mantissa: comptime_int) T {
const TBits = @Type(.{ .Int = .{ .signedness = .unsigned, .bits = @bitSizeOf(T) } });
const biased_exponent = @as(TBits, exponent + floatExponentMax(T));
return @bitCast(T, (biased_exponent << floatMantissaBits(T)) | @as(TBits, mantissa));
diff --git a/lib/std/mem.zig b/lib/std/mem.zig
index 5decb88ff3..b7f95e6366 100644
--- a/lib/std/mem.zig
+++ b/lib/std/mem.zig
@@ -267,7 +267,7 @@ pub fn zeroes(comptime T: type) T {
return null;
},
.Struct => |struct_info| {
- if (@sizeOf(T) == 0) return T{};
+ if (@sizeOf(T) == 0) return undefined;
if (struct_info.layout == .Extern) {
var item: T = undefined;
set(u8, asBytes(&item), 0);
@@ -424,6 +424,9 @@ test "zeroes" {
comptime var comptime_union = zeroes(C_union);
try testing.expectEqual(@as(u8, 0), comptime_union.a);
+
+ // Ensure zero sized struct with fields is initialized correctly.
+ _ = zeroes(struct { handle: void });
}
/// Initializes all fields of the struct with their default value, or zero values if no default value is present.
@@ -1316,7 +1319,7 @@ pub fn readIntNative(comptime T: type, bytes: *const [@divExact(@typeInfo(T).Int
/// This function cannot fail and cannot cause undefined behavior.
/// Assumes the endianness of memory is foreign, so it must byte-swap.
pub fn readIntForeign(comptime T: type, bytes: *const [@divExact(@typeInfo(T).Int.bits, 8)]u8) T {
- return @byteSwap(T, readIntNative(T, bytes));
+ return @byteSwap(readIntNative(T, bytes));
}
pub const readIntLittle = switch (native_endian) {
@@ -1345,7 +1348,7 @@ pub fn readIntSliceNative(comptime T: type, bytes: []const u8) T {
/// The bit count of T must be evenly divisible by 8.
/// Assumes the endianness of memory is foreign, so it must byte-swap.
pub fn readIntSliceForeign(comptime T: type, bytes: []const u8) T {
- return @byteSwap(T, readIntSliceNative(T, bytes));
+ return @byteSwap(readIntSliceNative(T, bytes));
}
pub const readIntSliceLittle = switch (native_endian) {
@@ -1427,7 +1430,7 @@ pub fn writeIntNative(comptime T: type, buf: *[(@typeInfo(T).Int.bits + 7) / 8]u
/// the integer bit width must be divisible by 8.
/// This function stores in foreign endian, which means it does a @byteSwap first.
pub fn writeIntForeign(comptime T: type, buf: *[@divExact(@typeInfo(T).Int.bits, 8)]u8, value: T) void {
- writeIntNative(T, buf, @byteSwap(T, value));
+ writeIntNative(T, buf, @byteSwap(value));
}
pub const writeIntLittle = switch (native_endian) {
@@ -1572,7 +1575,7 @@ pub const bswapAllFields = @compileError("bswapAllFields has been renamed to byt
pub fn byteSwapAllFields(comptime S: type, ptr: *S) void {
if (@typeInfo(S) != .Struct) @compileError("byteSwapAllFields expects a struct as the first argument");
inline for (std.meta.fields(S)) |f| {
- @field(ptr, f.name) = @byteSwap(f.field_type, @field(ptr, f.name));
+ @field(ptr, f.name) = @byteSwap(@field(ptr, f.name));
}
}
@@ -2749,14 +2752,14 @@ test "replaceOwned" {
pub fn littleToNative(comptime T: type, x: T) T {
return switch (native_endian) {
.Little => x,
- .Big => @byteSwap(T, x),
+ .Big => @byteSwap(x),
};
}
/// Converts a big-endian integer to host endianness.
pub fn bigToNative(comptime T: type, x: T) T {
return switch (native_endian) {
- .Little => @byteSwap(T, x),
+ .Little => @byteSwap(x),
.Big => x,
};
}
@@ -2781,14 +2784,14 @@ pub fn nativeTo(comptime T: type, x: T, desired_endianness: Endian) T {
pub fn nativeToLittle(comptime T: type, x: T) T {
return switch (native_endian) {
.Little => x,
- .Big => @byteSwap(T, x),
+ .Big => @byteSwap(x),
};
}
/// Converts an integer which has host endianness to big endian.
pub fn nativeToBig(comptime T: type, x: T) T {
return switch (native_endian) {
- .Little => @byteSwap(T, x),
+ .Little => @byteSwap(x),
.Big => x,
};
}
@@ -2800,7 +2803,7 @@ pub fn nativeToBig(comptime T: type, x: T) T {
/// - The delta required to align the pointer is not a multiple of the pointee's
/// type.
pub fn alignPointerOffset(ptr: anytype, align_to: u29) ?usize {
- assert(align_to != 0 and @popCount(u29, align_to) == 1);
+ assert(align_to != 0 and @popCount(align_to) == 1);
const T = @TypeOf(ptr);
const info = @typeInfo(T);
@@ -3249,13 +3252,13 @@ test "sliceAsBytes preserves pointer attributes" {
try testing.expectEqual(in.alignment, out.alignment);
}
-/// Round an address up to the nearest aligned address
+/// Round an address up to the next (or current) aligned address.
/// The alignment must be a power of 2 and greater than 0.
pub fn alignForward(addr: usize, alignment: usize) usize {
return alignForwardGeneric(usize, addr, alignment);
}
-/// Round an address up to the nearest aligned address
+/// Round an address up to the next (or current) aligned address.
/// The alignment must be a power of 2 and greater than 0.
pub fn alignForwardGeneric(comptime T: type, addr: T, alignment: T) T {
return alignBackwardGeneric(T, addr + (alignment - 1), alignment);
@@ -3287,25 +3290,25 @@ test "alignForward" {
try testing.expect(alignForward(17, 8) == 24);
}
-/// Round an address up to the previous aligned address
+/// Round an address down to the previous (or current) aligned address.
/// Unlike `alignBackward`, `alignment` can be any positive number, not just a power of 2.
pub fn alignBackwardAnyAlign(i: usize, alignment: usize) usize {
- if (@popCount(usize, alignment) == 1)
+ if (@popCount(alignment) == 1)
return alignBackward(i, alignment);
assert(alignment != 0);
return i - @mod(i, alignment);
}
-/// Round an address up to the previous aligned address
+/// Round an address down to the previous (or current) aligned address.
/// The alignment must be a power of 2 and greater than 0.
pub fn alignBackward(addr: usize, alignment: usize) usize {
return alignBackwardGeneric(usize, addr, alignment);
}
-/// Round an address up to the previous aligned address
+/// Round an address down to the previous (or current) aligned address.
/// The alignment must be a power of 2 and greater than 0.
pub fn alignBackwardGeneric(comptime T: type, addr: T, alignment: T) T {
- assert(@popCount(T, alignment) == 1);
+ assert(@popCount(alignment) == 1);
// 000010000 // example alignment
// 000001111 // subtract 1
// 111110000 // binary not
@@ -3315,11 +3318,11 @@ pub fn alignBackwardGeneric(comptime T: type, addr: T, alignment: T) T {
/// Returns whether `alignment` is a valid alignment, meaning it is
/// a positive power of 2.
pub fn isValidAlign(alignment: u29) bool {
- return @popCount(u29, alignment) == 1;
+ return @popCount(alignment) == 1;
}
pub fn isAlignedAnyAlign(i: usize, alignment: usize) bool {
- if (@popCount(usize, alignment) == 1)
+ if (@popCount(alignment) == 1)
return isAligned(i, alignment);
assert(alignment != 0);
return 0 == @mod(i, alignment);
diff --git a/lib/std/meta.zig b/lib/std/meta.zig
index c6717ad1c0..7e64eff49f 100644
--- a/lib/std/meta.zig
+++ b/lib/std/meta.zig
@@ -764,7 +764,7 @@ const TagPayloadType = TagPayload;
///Given a tagged union type, and an enum, return the type of the union
/// field corresponding to the enum tag.
-pub fn TagPayload(comptime U: type, tag: Tag(U)) type {
+pub fn TagPayload(comptime U: type, comptime tag: Tag(U)) type {
comptime debug.assert(trait.is(.Union)(U));
const info = @typeInfo(U).Union;
@@ -1024,28 +1024,13 @@ pub fn ArgsTuple(comptime Function: type) type {
if (function_info.is_var_args)
@compileError("Cannot create ArgsTuple for variadic function");
- var argument_field_list: [function_info.args.len]std.builtin.Type.StructField = undefined;
+ var argument_field_list: [function_info.args.len]type = undefined;
inline for (function_info.args) |arg, i| {
const T = arg.arg_type.?;
- @setEvalBranchQuota(10_000);
- var num_buf: [128]u8 = undefined;
- argument_field_list[i] = .{
- .name = std.fmt.bufPrint(&num_buf, "{d}", .{i}) catch unreachable,
- .field_type = T,
- .default_value = null,
- .is_comptime = false,
- .alignment = if (@sizeOf(T) > 0) @alignOf(T) else 0,
- };
+ argument_field_list[i] = T;
}
- return @Type(.{
- .Struct = .{
- .is_tuple = true,
- .layout = .Auto,
- .decls = &.{},
- .fields = &argument_field_list,
- },
- });
+ return CreateUniqueTuple(argument_field_list.len, argument_field_list);
}
/// For a given anonymous list of types, returns a new tuple type
@@ -1056,6 +1041,10 @@ pub fn ArgsTuple(comptime Function: type) type {
/// - `Tuple(&[_]type {f32})` ⇒ `tuple { f32 }`
/// - `Tuple(&[_]type {f32,u32})` ⇒ `tuple { f32, u32 }`
pub fn Tuple(comptime types: []const type) type {
+ return CreateUniqueTuple(types.len, types[0..types.len].*);
+}
+
+fn CreateUniqueTuple(comptime N: comptime_int, comptime types: [N]type) type {
var tuple_fields: [types.len]std.builtin.Type.StructField = undefined;
inline for (types) |T, i| {
@setEvalBranchQuota(10_000);
@@ -1118,6 +1107,32 @@ test "Tuple" {
TupleTester.assertTuple(.{ u32, f16, []const u8, void }, Tuple(&[_]type{ u32, f16, []const u8, void }));
}
+test "Tuple deduplication" {
+ const T1 = std.meta.Tuple(&.{ u32, f32, i8 });
+ const T2 = std.meta.Tuple(&.{ u32, f32, i8 });
+ const T3 = std.meta.Tuple(&.{ u32, f32, i7 });
+
+ if (T1 != T2) {
+ @compileError("std.meta.Tuple doesn't deduplicate tuple types.");
+ }
+ if (T1 == T3) {
+ @compileError("std.meta.Tuple fails to generate different types.");
+ }
+}
+
+test "ArgsTuple forwarding" {
+ const T1 = std.meta.Tuple(&.{ u32, f32, i8 });
+ const T2 = std.meta.ArgsTuple(fn (u32, f32, i8) void);
+ const T3 = std.meta.ArgsTuple(fn (u32, f32, i8) callconv(.C) noreturn);
+
+ if (T1 != T2) {
+ @compileError("std.meta.ArgsTuple produces different types than std.meta.Tuple");
+ }
+ if (T1 != T3) {
+ @compileError("std.meta.ArgsTuple produces different types for the same argument lists.");
+ }
+}
+
/// TODO: https://github.com/ziglang/zig/issues/425
pub fn globalOption(comptime name: []const u8, comptime T: type) ?T {
if (!@hasDecl(root, name))
@@ -1134,3 +1149,27 @@ test "isError" {
try std.testing.expect(isError(math.absInt(@as(i8, -128))));
try std.testing.expect(!isError(math.absInt(@as(i8, -127))));
}
+
+/// This function returns a function pointer for a given function signature.
+/// It's a helper to make code compatible to both stage1 and stage2.
+///
+/// **WARNING:** This function is deprecated and will be removed together with stage1.
+pub fn FnPtr(comptime Fn: type) type {
+ return if (@import("builtin").zig_backend != .stage1)
+ *const Fn
+ else
+ Fn;
+}
+
+test "FnPtr" {
+ var func: FnPtr(fn () i64) = undefined;
+
+ // verify that we can perform runtime exchange
+ // and not have a function body in stage2:
+
+ func = std.time.timestamp;
+ _ = func();
+
+ func = std.time.milliTimestamp;
+ _ = func();
+}
diff --git a/lib/std/multi_array_list.zig b/lib/std/multi_array_list.zig
index 48dddf636d..11aec707ab 100644
--- a/lib/std/multi_array_list.zig
+++ b/lib/std/multi_array_list.zig
@@ -459,7 +459,7 @@ pub fn MultiArrayList(comptime S: type) type {
return self.bytes[0..capacityInBytes(self.capacity)];
}
- fn FieldType(field: Field) type {
+ fn FieldType(comptime field: Field) type {
return meta.fieldInfo(S, field).field_type;
}
diff --git a/lib/std/os.zig b/lib/std/os.zig
index 1192c72629..59f2a2173f 100644
--- a/lib/std/os.zig
+++ b/lib/std/os.zig
@@ -475,10 +475,9 @@ pub fn abort() noreturn {
// Install default handler so that the tkill below will terminate.
const sigact = Sigaction{
- .handler = .{ .sigaction = SIG.DFL },
- .mask = undefined,
- .flags = undefined,
- .restorer = undefined,
+ .handler = .{ .handler = SIG.DFL },
+ .mask = empty_sigset,
+ .flags = 0,
};
sigaction(SIG.ABRT, &sigact, null) catch |err| switch (err) {
error.OperationNotSupported => unreachable,
@@ -953,6 +952,10 @@ pub const WriteError = error{
OperationAborted,
NotOpenForWriting,
+ /// The process cannot access the file because another process has locked
+ /// a portion of the file. Windows-only.
+ LockViolation,
+
/// This error occurs when no global event loop is configured,
/// and reading from the file descriptor would block.
WouldBlock,
@@ -2648,6 +2651,7 @@ pub fn renameatW(
.creation = windows.FILE_OPEN,
.io_mode = .blocking,
.filter = .any, // This function is supposed to rename both files and directories.
+ .follow_symlinks = false,
}) catch |err| switch (err) {
error.WouldBlock => unreachable, // Not possible without `.share_access_nonblocking = true`.
else => |e| return e,
@@ -5443,11 +5447,7 @@ pub fn toPosixPath(file_path: []const u8) ![MAX_PATH_BYTES - 1:0]u8 {
/// if this happens the fix is to add the error code to the corresponding
/// switch expression, possibly introduce a new error in the error set, and
/// send a patch to Zig.
-/// The self-hosted compiler is not fully capable of handle the related code.
-/// Until then, unexpected error tracing is disabled for the self-hosted compiler.
-/// TODO remove this once self-hosted is capable enough to handle printing and
-/// stack trace dumping.
-pub const unexpected_error_tracing = builtin.zig_backend == .stage1 and builtin.mode == .Debug;
+pub const unexpected_error_tracing = (builtin.zig_backend == .stage1 or builtin.zig_backend == .stage2_llvm) and builtin.mode == .Debug;
pub const UnexpectedError = error{
/// The Operating System returned an undocumented error code.
@@ -6251,7 +6251,7 @@ pub const CopyFileRangeError = error{
NoSpaceLeft,
Unseekable,
PermissionDenied,
- FileBusy,
+ SwapFile,
} || PReadError || PWriteError || UnexpectedError;
var has_copy_file_range_syscall = std.atomic.Atomic(bool).init(true);
@@ -6305,7 +6305,7 @@ pub fn copy_file_range(fd_in: fd_t, off_in: u64, fd_out: fd_t, off_out: u64, len
.NOSPC => return error.NoSpaceLeft,
.OVERFLOW => return error.Unseekable,
.PERM => return error.PermissionDenied,
- .TXTBSY => return error.FileBusy,
+ .TXTBSY => return error.SwapFile,
// these may not be regular files, try fallback
.INVAL => {},
// support for cross-filesystem copy added in Linux 5.3, use fallback
diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig
index ae9b441b60..c4dafcd3b8 100644
--- a/lib/std/os/linux.zig
+++ b/lib/std/os/linux.zig
@@ -1945,9 +1945,9 @@ pub const SIG = if (is_mips) struct {
pub const SYS = 31;
pub const UNUSED = SIG.SYS;
- pub const ERR = @intToPtr(?Sigaction.sigaction_fn, maxInt(usize));
- pub const DFL = @intToPtr(?Sigaction.sigaction_fn, 0);
- pub const IGN = @intToPtr(?Sigaction.sigaction_fn, 1);
+ pub const ERR = @intToPtr(?Sigaction.handler_fn, maxInt(usize));
+ pub const DFL = @intToPtr(?Sigaction.handler_fn, 0);
+ pub const IGN = @intToPtr(?Sigaction.handler_fn, 1);
} else if (is_sparc) struct {
pub const BLOCK = 1;
pub const UNBLOCK = 2;
@@ -1989,9 +1989,9 @@ pub const SIG = if (is_mips) struct {
pub const PWR = LOST;
pub const IO = SIG.POLL;
- pub const ERR = @intToPtr(?Sigaction.sigaction_fn, maxInt(usize));
- pub const DFL = @intToPtr(?Sigaction.sigaction_fn, 0);
- pub const IGN = @intToPtr(?Sigaction.sigaction_fn, 1);
+ pub const ERR = @intToPtr(?Sigaction.handler_fn, maxInt(usize));
+ pub const DFL = @intToPtr(?Sigaction.handler_fn, 0);
+ pub const IGN = @intToPtr(?Sigaction.handler_fn, 1);
} else struct {
pub const BLOCK = 0;
pub const UNBLOCK = 1;
@@ -2032,9 +2032,9 @@ pub const SIG = if (is_mips) struct {
pub const SYS = 31;
pub const UNUSED = SIG.SYS;
- pub const ERR = @intToPtr(?Sigaction.sigaction_fn, maxInt(usize));
- pub const DFL = @intToPtr(?Sigaction.sigaction_fn, 0);
- pub const IGN = @intToPtr(?Sigaction.sigaction_fn, 1);
+ pub const ERR = @intToPtr(?Sigaction.handler_fn, maxInt(usize));
+ pub const DFL = @intToPtr(?Sigaction.handler_fn, 0);
+ pub const IGN = @intToPtr(?Sigaction.handler_fn, 1);
};
pub const kernel_rwf = u32;
@@ -3377,7 +3377,7 @@ pub const cpu_count_t = std.meta.Int(.unsigned, std.math.log2(CPU_SETSIZE * 8));
pub fn CPU_COUNT(set: cpu_set_t) cpu_count_t {
var sum: cpu_count_t = 0;
for (set) |x| {
- sum += @popCount(usize, x);
+ sum += @popCount(x);
}
return sum;
}
diff --git a/lib/std/os/linux/bpf.zig b/lib/std/os/linux/bpf.zig
index 4cb9dbf5c5..147bf610d8 100644
--- a/lib/std/os/linux/bpf.zig
+++ b/lib/std/os/linux/bpf.zig
@@ -458,7 +458,7 @@ pub const Insn = packed struct {
else
ImmOrReg{ .imm = src };
- const src_type = switch (imm_or_reg) {
+ const src_type: u8 = switch (imm_or_reg) {
.imm => K,
.reg => X,
};
diff --git a/lib/std/os/linux/syscalls.zig b/lib/std/os/linux/syscalls.zig
index fb0993afe5..6e8cee7b84 100644
--- a/lib/std/os/linux/syscalls.zig
+++ b/lib/std/os/linux/syscalls.zig
@@ -3485,6 +3485,7 @@ pub const RiscV64 = enum(usize) {
landlock_create_ruleset = 444,
landlock_add_rule = 445,
landlock_restrict_self = 446,
+ memfd_secret = 447,
process_mrelease = 448,
futex_waitv = 449,
set_mempolicy_home_node = 450,
diff --git a/lib/std/os/plan9.zig b/lib/std/os/plan9.zig
index 99e25709b1..6d052a3573 100644
--- a/lib/std/os/plan9.zig
+++ b/lib/std/os/plan9.zig
@@ -1,7 +1,7 @@
const std = @import("../std.zig");
const builtin = @import("builtin");
-pub const syscall_bits = switch (builtin.stage2_arch) {
+pub const syscall_bits = switch (builtin.cpu.arch) {
.x86_64 => @import("plan9/x86_64.zig"),
else => @compileError("more plan9 syscall implementations (needs more inline asm in stage2"),
};
diff --git a/lib/std/os/test.zig b/lib/std/os/test.zig
index 44f8b16b9e..a8497586f9 100644
--- a/lib/std/os/test.zig
+++ b/lib/std/os/test.zig
@@ -785,7 +785,7 @@ test "sigaction" {
try testing.expect(signal_test_failed == false);
// Check if the handler has been correctly reset to SIG_DFL
try os.sigaction(os.SIG.USR1, null, &old_sa);
- try testing.expectEqual(os.SIG.DFL, old_sa.handler.sigaction);
+ try testing.expectEqual(os.SIG.DFL, old_sa.handler.handler);
}
test "dup & dup2" {
diff --git a/lib/std/os/uefi.zig b/lib/std/os/uefi.zig
index 386ad7796a..be975dddbf 100644
--- a/lib/std/os/uefi.zig
+++ b/lib/std/os/uefi.zig
@@ -55,9 +55,9 @@ pub const Guid = extern struct {
if (f.len == 0) {
const fmt = std.fmt.fmtSliceHexLower;
- const time_low = @byteSwap(u32, self.time_low);
- const time_mid = @byteSwap(u16, self.time_mid);
- const time_high_and_version = @byteSwap(u16, self.time_high_and_version);
+ const time_low = @byteSwap(self.time_low);
+ const time_mid = @byteSwap(self.time_mid);
+ const time_high_and_version = @byteSwap(self.time_high_and_version);
return std.fmt.format(writer, "{:0>8}-{:0>4}-{:0>4}-{:0>2}{:0>2}-{:0>12}", .{
fmt(std.mem.asBytes(&time_low)),
diff --git a/lib/std/os/windows.zig b/lib/std/os/windows.zig
index 3e42ee5f2d..b0a0f6d407 100644
--- a/lib/std/os/windows.zig
+++ b/lib/std/os/windows.zig
@@ -517,6 +517,9 @@ pub const WriteFileError = error{
OperationAborted,
BrokenPipe,
NotOpenForWriting,
+ /// The process cannot access the file because another process has locked
+ /// a portion of the file.
+ LockViolation,
Unexpected,
};
@@ -597,6 +600,7 @@ pub fn WriteFile(
.IO_PENDING => unreachable,
.BROKEN_PIPE => return error.BrokenPipe,
.INVALID_HANDLE => return error.NotOpenForWriting,
+ .LOCK_VIOLATION => return error.LockViolation,
else => |err| return unexpectedError(err),
}
}
@@ -1798,7 +1802,7 @@ pub const PathSpace = struct {
data: [PATH_MAX_WIDE:0]u16,
len: usize,
- pub fn span(self: PathSpace) [:0]const u16 {
+ pub fn span(self: *const PathSpace) [:0]const u16 {
return self.data[0..self.len :0];
}
};
diff --git a/lib/std/packed_int_array.zig b/lib/std/packed_int_array.zig
index a07fc13af1..902e3ee19c 100644
--- a/lib/std/packed_int_array.zig
+++ b/lib/std/packed_int_array.zig
@@ -76,7 +76,7 @@ pub fn PackedIntIo(comptime Int: type, comptime endian: Endian) type {
const value_ptr = @ptrCast(*align(1) const Container, &bytes[start_byte]);
var value = value_ptr.*;
- if (endian != native_endian) value = @byteSwap(Container, value);
+ if (endian != native_endian) value = @byteSwap(value);
switch (endian) {
.Big => {
@@ -126,7 +126,7 @@ pub fn PackedIntIo(comptime Int: type, comptime endian: Endian) type {
const target_ptr = @ptrCast(*align(1) Container, &bytes[start_byte]);
var target = target_ptr.*;
- if (endian != native_endian) target = @byteSwap(Container, target);
+ if (endian != native_endian) target = @byteSwap(target);
//zero the bits we want to replace in the existing bytes
const inv_mask = @intCast(Container, std.math.maxInt(UnInt)) << keep_shift;
@@ -136,7 +136,7 @@ pub fn PackedIntIo(comptime Int: type, comptime endian: Endian) type {
//merge the new value
target |= value;
- if (endian != native_endian) target = @byteSwap(Container, target);
+ if (endian != native_endian) target = @byteSwap(target);
//save it back
target_ptr.* = target;
diff --git a/lib/std/pdb.zig b/lib/std/pdb.zig
index a44296c920..00ce2cc5ba 100644
--- a/lib/std/pdb.zig
+++ b/lib/std/pdb.zig
@@ -310,6 +310,10 @@ pub const SymbolKind = enum(u16) {
pub const TypeIndex = u32;
+// TODO According to this header:
+// https://github.com/microsoft/microsoft-pdb/blob/082c5290e5aff028ae84e43affa8be717aa7af73/include/cvinfo.h#L3722
+// we should define RecordPrefix as part of the ProcSym structure.
+// This might be important when we start generating PDB in self-hosted with our own PE linker.
pub const ProcSym = extern struct {
Parent: u32,
End: u32,
@@ -321,8 +325,7 @@ pub const ProcSym = extern struct {
CodeOffset: u32,
Segment: u16,
Flags: ProcSymFlags,
- // following is a null terminated string
- // Name: [*]u8,
+ Name: [1]u8, // null-terminated
};
pub const ProcSymFlags = packed struct {
@@ -693,7 +696,7 @@ pub const Pdb = struct {
.S_LPROC32, .S_GPROC32 => {
const proc_sym = @ptrCast(*align(1) ProcSym, &module.symbols[symbol_i + @sizeOf(RecordPrefix)]);
if (address >= proc_sym.CodeOffset and address < proc_sym.CodeOffset + proc_sym.CodeSize) {
- return mem.sliceTo(@ptrCast([*:0]u8, proc_sym) + @sizeOf(ProcSym), 0);
+ return mem.sliceTo(@ptrCast([*:0]u8, &proc_sym.Name[0]), 0);
}
},
else => {},
diff --git a/lib/std/priority_dequeue.zig b/lib/std/priority_dequeue.zig
index 228c6d62ae..ae403c1d03 100644
--- a/lib/std/priority_dequeue.zig
+++ b/lib/std/priority_dequeue.zig
@@ -69,7 +69,7 @@ pub fn PriorityDequeue(comptime T: type, comptime Context: type, comptime compar
// The first element is on a min layer;
// next two are on a max layer;
// next four are on a min layer, and so on.
- const leading_zeros = @clz(usize, index + 1);
+ const leading_zeros = @clz(index + 1);
const highest_set_bit = @bitSizeOf(usize) - 1 - leading_zeros;
return (highest_set_bit & 1) == 0;
}
diff --git a/lib/std/rand.zig b/lib/std/rand.zig
index a121a3f5ba..c13d2895c9 100644
--- a/lib/std/rand.zig
+++ b/lib/std/rand.zig
@@ -257,15 +257,15 @@ pub const Random = struct {
// If all 41 bits are zero, generate additional random bits, until a
// set bit is found, or 126 bits have been generated.
const rand = r.int(u64);
- var rand_lz = @clz(u64, rand);
+ var rand_lz = @clz(rand);
if (rand_lz >= 41) {
// TODO: when #5177 or #489 is implemented,
// tell the compiler it is unlikely (1/2^41) to reach this point.
// (Same for the if branch and the f64 calculations below.)
- rand_lz = 41 + @clz(u64, r.int(u64));
+ rand_lz = 41 + @clz(r.int(u64));
if (rand_lz == 41 + 64) {
// It is astronomically unlikely to reach this point.
- rand_lz += @clz(u32, r.int(u32) | 0x7FF);
+ rand_lz += @clz(r.int(u32) | 0x7FF);
}
}
const mantissa = @truncate(u23, rand);
@@ -277,12 +277,12 @@ pub const Random = struct {
// If all 12 bits are zero, generate additional random bits, until a
// set bit is found, or 1022 bits have been generated.
const rand = r.int(u64);
- var rand_lz: u64 = @clz(u64, rand);
+ var rand_lz: u64 = @clz(rand);
if (rand_lz >= 12) {
rand_lz = 12;
while (true) {
// It is astronomically unlikely for this loop to execute more than once.
- const addl_rand_lz = @clz(u64, r.int(u64));
+ const addl_rand_lz = @clz(r.int(u64));
rand_lz += addl_rand_lz;
if (addl_rand_lz != 64) {
break;
@@ -337,6 +337,42 @@ pub const Random = struct {
mem.swap(T, &buf[i], &buf[j]);
}
}
+
+ /// Randomly selects an index into `proportions`, where the likelihood of each
+ /// index is weighted by that proportion.
+ ///
+ /// This is useful for selecting an item from a slice where weights are not equal.
+ /// `T` must be a numeric type capable of holding the sum of `proportions`.
+ pub fn weightedIndex(r: std.rand.Random, comptime T: type, proportions: []T) usize {
+ // This implementation works by summing the proportions and picking a random
+ // point in [0, sum). We then loop over the proportions, accumulating
+ // until our accumulator is greater than the random point.
+
+ var sum: T = 0;
+ for (proportions) |v| {
+ sum += v;
+ }
+
+ const point = if (comptime std.meta.trait.isSignedInt(T))
+ r.intRangeLessThan(T, 0, sum)
+ else if (comptime std.meta.trait.isUnsignedInt(T))
+ r.uintLessThan(T, sum)
+ else if (comptime std.meta.trait.isFloat(T))
+ // take care that imprecision doesn't lead to a value slightly greater than sum
+ std.math.min(r.float(T) * sum, sum - std.math.epsilon(T))
+ else
+ @compileError("weightedIndex does not support proportions of type " ++ @typeName(T));
+
+ std.debug.assert(point < sum);
+
+ var accumulator: T = 0;
+ for (proportions) |p, index| {
+ accumulator += p;
+ if (point < accumulator) return index;
+ }
+
+ unreachable;
+ }
};
/// Convert a random integer 0 <= random_int <= maxValue(T),
diff --git a/lib/std/rand/test.zig b/lib/std/rand/test.zig
index 7c2016901f..cae77d6e37 100644
--- a/lib/std/rand/test.zig
+++ b/lib/std/rand/test.zig
@@ -445,3 +445,29 @@ test "CSPRNG" {
const c = random.int(u64);
try expect(a ^ b ^ c != 0);
}
+
+test "Random weightedIndex" {
+ // Make sure weightedIndex works for various integers and floats
+ inline for (.{ u64, i4, f32, f64 }) |T| {
+ var prng = DefaultPrng.init(0);
+ const random = prng.random();
+
+ var proportions = [_]T{ 2, 1, 1, 2 };
+ var counts = [_]f64{ 0, 0, 0, 0 };
+
+ const n_trials: u64 = 10_000;
+ var i: usize = 0;
+ while (i < n_trials) : (i += 1) {
+ const pick = random.weightedIndex(T, &proportions);
+ counts[pick] += 1;
+ }
+
+ // We expect the first and last counts to be roughly 2x the second and third
+ const approxEqRel = std.math.approxEqRel;
+ // Define "roughly" to be within 10%
+ const tolerance = 0.1;
+ try std.testing.expect(approxEqRel(f64, counts[0], counts[1] * 2, tolerance));
+ try std.testing.expect(approxEqRel(f64, counts[1], counts[2], tolerance));
+ try std.testing.expect(approxEqRel(f64, counts[2] * 2, counts[3], tolerance));
+ }
+}
diff --git a/lib/std/start.zig b/lib/std/start.zig
index 8ea261be2d..e6a7b7991a 100644
--- a/lib/std/start.zig
+++ b/lib/std/start.zig
@@ -131,7 +131,7 @@ fn wWinMainCRTStartup2() callconv(.C) noreturn {
fn exit2(code: usize) noreturn {
switch (native_os) {
- .linux => switch (builtin.stage2_arch) {
+ .linux => switch (builtin.cpu.arch) {
.x86_64 => {
asm volatile ("syscall"
:
@@ -175,7 +175,7 @@ fn exit2(code: usize) noreturn {
else => @compileError("TODO"),
},
// exits(0)
- .plan9 => switch (builtin.stage2_arch) {
+ .plan9 => switch (builtin.cpu.arch) {
.x86_64 => {
asm volatile (
\\push $0
diff --git a/lib/std/target.zig b/lib/std/target.zig
index 428ae57e76..b6a8a8b9c0 100644
--- a/lib/std/target.zig
+++ b/lib/std/target.zig
@@ -9,6 +9,7 @@ pub const Target = struct {
cpu: Cpu,
os: Os,
abi: Abi,
+ ofmt: ObjectFormat,
pub const Os = struct {
tag: Tag,
@@ -624,6 +625,20 @@ pub const Target = struct {
.dxcontainer => @panic("TODO what's the extension for these?"),
};
}
+
+ pub fn default(os_tag: Os.Tag, cpu_arch: Cpu.Arch) ObjectFormat {
+ return switch (os_tag) {
+ .windows, .uefi => .coff,
+ .ios, .macos, .watchos, .tvos => .macho,
+ .plan9 => .plan9,
+ else => return switch (cpu_arch) {
+ .wasm32, .wasm64 => .wasm,
+ .spirv32, .spirv64 => .spirv,
+ .nvptx, .nvptx64 => .nvptx,
+ else => .elf,
+ },
+ };
+ }
};
pub const SubSystem = enum {
@@ -1426,24 +1441,6 @@ pub const Target = struct {
return libPrefix_os_abi(self.os.tag, self.abi);
}
- pub fn getObjectFormatSimple(os_tag: Os.Tag, cpu_arch: Cpu.Arch) ObjectFormat {
- return switch (os_tag) {
- .windows, .uefi => .coff,
- .ios, .macos, .watchos, .tvos => .macho,
- .plan9 => .plan9,
- else => return switch (cpu_arch) {
- .wasm32, .wasm64 => .wasm,
- .spirv32, .spirv64 => .spirv,
- .nvptx, .nvptx64 => .nvptx,
- else => .elf,
- },
- };
- }
-
- pub fn getObjectFormat(self: Target) ObjectFormat {
- return getObjectFormatSimple(self.os.tag, self.cpu.arch);
- }
-
pub fn isMinGW(self: Target) bool {
return self.os.tag == .windows and self.isGnu();
}
@@ -1801,10 +1798,11 @@ pub const Target = struct {
else => false,
},
f64 => switch (target.cpu.arch) {
+ .aarch64 => target.isDarwin(),
+
.x86_64,
.i386,
.riscv64,
- .aarch64,
.aarch64_be,
.aarch64_32,
.s390x,
@@ -1856,24 +1854,28 @@ pub const Target = struct {
else => 4,
},
- // For x86_64, LLVMABIAlignmentOfType(i128) reports 8. However I think 16
- // is a better number for two reasons:
- // 1. Better machine code when loading into SIMD register.
+ // For these, LLVMABIAlignmentOfType(i128) reports 8. Note that 16
+ // is a relevant number in three cases:
+ // 1. Different machine code instruction when loading into SIMD register.
// 2. The C ABI wants 16 for extern structs.
// 3. 16-byte cmpxchg needs 16-byte alignment.
- // Same logic for riscv64, powerpc64, mips64, sparc64.
+ // Same logic for powerpc64, mips64, sparc64.
.x86_64,
- .riscv64,
.powerpc64,
.powerpc64le,
.mips64,
.mips64el,
.sparc64,
+ => return switch (target.ofmt) {
+ .c => 16,
+ else => 8,
+ },
// Even LLVMABIAlignmentOfType(i128) agrees on these targets.
.aarch64,
.aarch64_be,
.aarch64_32,
+ .riscv64,
.bpfel,
.bpfeb,
.nvptx,
diff --git a/lib/std/valgrind/callgrind.zig b/lib/std/valgrind/callgrind.zig
index 6c7dadf1e1..fd6967bb96 100644
--- a/lib/std/valgrind/callgrind.zig
+++ b/lib/std/valgrind/callgrind.zig
@@ -2,7 +2,7 @@ const std = @import("../std.zig");
const valgrind = std.valgrind;
pub const CallgrindClientRequest = enum(usize) {
- DumpStats = valgrind.ToolBase("CT"),
+ DumpStats = valgrind.ToolBase("CT".*),
ZeroStats,
ToggleCollect,
DumpStatsAt,
diff --git a/lib/std/zig.zig b/lib/std/zig.zig
index 4f9f0c6b0e..1d221b9abd 100644
--- a/lib/std/zig.zig
+++ b/lib/std/zig.zig
@@ -103,7 +103,6 @@ pub const BinNameOptions = struct {
target: std.Target,
output_mode: std.builtin.OutputMode,
link_mode: ?std.builtin.LinkMode = null,
- object_format: ?std.Target.ObjectFormat = null,
version: ?std.builtin.Version = null,
};
@@ -111,8 +110,7 @@ pub const BinNameOptions = struct {
pub fn binNameAlloc(allocator: std.mem.Allocator, options: BinNameOptions) error{OutOfMemory}![]u8 {
const root_name = options.root_name;
const target = options.target;
- const ofmt = options.object_format orelse target.getObjectFormat();
- switch (ofmt) {
+ switch (target.ofmt) {
.coff => switch (options.output_mode) {
.Exe => return std.fmt.allocPrint(allocator, "{s}{s}", .{ root_name, target.exeFileExt() }),
.Lib => {
@@ -186,8 +184,12 @@ pub fn binNameAlloc(allocator: std.mem.Allocator, options: BinNameOptions) error
.raw => return std.fmt.allocPrint(allocator, "{s}.bin", .{root_name}),
.plan9 => switch (options.output_mode) {
.Exe => return allocator.dupe(u8, root_name),
- .Obj => return std.fmt.allocPrint(allocator, "{s}{s}", .{ root_name, ofmt.fileExt(target.cpu.arch) }),
- .Lib => return std.fmt.allocPrint(allocator, "{s}{s}.a", .{ target.libPrefix(), root_name }),
+ .Obj => return std.fmt.allocPrint(allocator, "{s}{s}", .{
+ root_name, target.ofmt.fileExt(target.cpu.arch),
+ }),
+ .Lib => return std.fmt.allocPrint(allocator, "{s}{s}.a", .{
+ target.libPrefix(), root_name,
+ }),
},
.nvptx => return std.fmt.allocPrint(allocator, "{s}", .{root_name}),
.dxcontainer => @panic("TODO what's the file extension for these?"),
diff --git a/lib/std/zig/Ast.zig b/lib/std/zig/Ast.zig
index 9bffcb3df2..016cefb255 100644
--- a/lib/std/zig/Ast.zig
+++ b/lib/std/zig/Ast.zig
@@ -2967,7 +2967,7 @@ pub const Node = struct {
/// Same as ContainerDeclTwo except there is known to be a trailing comma
/// or semicolon before the rbrace.
container_decl_two_trailing,
- /// `union(lhs)` / `enum(lhs)`. `SubRange[rhs]`.
+ /// `struct(lhs)` / `union(lhs)` / `enum(lhs)`. `SubRange[rhs]`.
container_decl_arg,
/// Same as container_decl_arg but there is known to be a trailing
/// comma or semicolon before the rbrace.
diff --git a/lib/std/zig/CrossTarget.zig b/lib/std/zig/CrossTarget.zig
index 79050e161a..6c59a4a3a1 100644
--- a/lib/std/zig/CrossTarget.zig
+++ b/lib/std/zig/CrossTarget.zig
@@ -42,6 +42,9 @@ abi: ?Target.Abi = null,
/// based on the `os_tag`.
dynamic_linker: DynamicLinker = DynamicLinker{},
+/// `null` means default for the cpu/arch/os combo.
+ofmt: ?Target.ObjectFormat = null,
+
pub const CpuModel = union(enum) {
/// Always native
native,
@@ -171,6 +174,7 @@ pub fn toTarget(self: CrossTarget) Target {
.cpu = self.getCpu(),
.os = self.getOs(),
.abi = self.getAbi(),
+ .ofmt = self.getObjectFormat(),
};
}
@@ -200,6 +204,8 @@ pub const ParseOptions = struct {
/// detected path, or a standard path.
dynamic_linker: ?[]const u8 = null,
+ object_format: ?[]const u8 = null,
+
/// If this is provided, the function will populate some information about parsing failures,
/// so that user-friendly error messages can be delivered.
diagnostics: ?*Diagnostics = null,
@@ -324,6 +330,11 @@ pub fn parse(args: ParseOptions) !CrossTarget {
}
}
+ if (args.object_format) |ofmt_name| {
+ result.ofmt = std.meta.stringToEnum(Target.ObjectFormat, ofmt_name) orelse
+ return error.UnknownObjectFormat;
+ }
+
return result;
}
@@ -623,7 +634,7 @@ pub fn setGnuLibCVersion(self: *CrossTarget, major: u32, minor: u32, patch: u32)
}
pub fn getObjectFormat(self: CrossTarget) Target.ObjectFormat {
- return Target.getObjectFormatSimple(self.getOsTag(), self.getCpuArch());
+ return self.ofmt orelse Target.ObjectFormat.default(self.getOsTag(), self.getCpuArch());
}
pub fn updateCpuFeatures(self: CrossTarget, set: *Target.Cpu.Feature.Set) void {
diff --git a/lib/std/zig/c_builtins.zig b/lib/std/zig/c_builtins.zig
index 8f2eebccd4..08a2ec78c4 100644
--- a/lib/std/zig/c_builtins.zig
+++ b/lib/std/zig/c_builtins.zig
@@ -1,13 +1,13 @@
const std = @import("std");
pub inline fn __builtin_bswap16(val: u16) u16 {
- return @byteSwap(u16, val);
+ return @byteSwap(val);
}
pub inline fn __builtin_bswap32(val: u32) u32 {
- return @byteSwap(u32, val);
+ return @byteSwap(val);
}
pub inline fn __builtin_bswap64(val: u64) u64 {
- return @byteSwap(u64, val);
+ return @byteSwap(val);
}
pub inline fn __builtin_signbit(val: f64) c_int {
@@ -20,19 +20,19 @@ pub inline fn __builtin_signbitf(val: f32) c_int {
pub inline fn __builtin_popcount(val: c_uint) c_int {
// popcount of a c_uint will never exceed the capacity of a c_int
@setRuntimeSafety(false);
- return @bitCast(c_int, @as(c_uint, @popCount(c_uint, val)));
+ return @bitCast(c_int, @as(c_uint, @popCount(val)));
}
pub inline fn __builtin_ctz(val: c_uint) c_int {
// Returns the number of trailing 0-bits in val, starting at the least significant bit position.
// In C if `val` is 0, the result is undefined; in zig it's the number of bits in a c_uint
@setRuntimeSafety(false);
- return @bitCast(c_int, @as(c_uint, @ctz(c_uint, val)));
+ return @bitCast(c_int, @as(c_uint, @ctz(val)));
}
pub inline fn __builtin_clz(val: c_uint) c_int {
// Returns the number of leading 0-bits in x, starting at the most significant bit position.
// In C if `val` is 0, the result is undefined; in zig it's the number of bits in a c_uint
@setRuntimeSafety(false);
- return @bitCast(c_int, @as(c_uint, @clz(c_uint, val)));
+ return @bitCast(c_int, @as(c_uint, @clz(val)));
}
pub inline fn __builtin_sqrt(val: f64) f64 {
diff --git a/lib/std/zig/c_translation.zig b/lib/std/zig/c_translation.zig
index 8a2086e9ad..348e3a7133 100644
--- a/lib/std/zig/c_translation.zig
+++ b/lib/std/zig/c_translation.zig
@@ -349,7 +349,7 @@ test "shuffleVectorIndex" {
/// Constructs a [*c] pointer with the const and volatile annotations
/// from SelfType for pointing to a C flexible array of ElementType.
-pub fn FlexibleArrayType(comptime SelfType: type, ElementType: type) type {
+pub fn FlexibleArrayType(comptime SelfType: type, comptime ElementType: type) type {
switch (@typeInfo(SelfType)) {
.Pointer => |ptr| {
return @Type(.{ .Pointer = .{
diff --git a/lib/std/zig/parse.zig b/lib/std/zig/parse.zig
index 2a7d2623ef..fda6ad98b9 100644
--- a/lib/std/zig/parse.zig
+++ b/lib/std/zig/parse.zig
@@ -3356,16 +3356,18 @@ const Parser = struct {
}
/// Caller must have already verified the first token.
+ /// ContainerDeclAuto <- ContainerDeclType LBRACE container_doc_comment? ContainerMembers RBRACE
+ ///
/// ContainerDeclType
- /// <- KEYWORD_struct
+ /// <- KEYWORD_struct (LPAREN Expr RPAREN)?
+ /// / KEYWORD_opaque
/// / KEYWORD_enum (LPAREN Expr RPAREN)?
/// / KEYWORD_union (LPAREN (KEYWORD_enum (LPAREN Expr RPAREN)? / Expr) RPAREN)?
- /// / KEYWORD_opaque
fn parseContainerDeclAuto(p: *Parser) !Node.Index {
const main_token = p.nextToken();
const arg_expr = switch (p.token_tags[main_token]) {
- .keyword_struct, .keyword_opaque => null_node,
- .keyword_enum => blk: {
+ .keyword_opaque => null_node,
+ .keyword_struct, .keyword_enum => blk: {
if (p.eatToken(.l_paren)) |_| {
const expr = try p.expectExpr();
_ = try p.expectToken(.r_paren);
@@ -3668,7 +3670,7 @@ const Parser = struct {
}
/// KEYWORD_if LPAREN Expr RPAREN PtrPayload? Body (KEYWORD_else Payload? Body)?
- fn parseIf(p: *Parser, bodyParseFn: fn (p: *Parser) Error!Node.Index) !Node.Index {
+ fn parseIf(p: *Parser, comptime bodyParseFn: fn (p: *Parser) Error!Node.Index) !Node.Index {
const if_token = p.eatToken(.keyword_if) orelse return null_node;
_ = try p.expectToken(.l_paren);
const condition = try p.expectExpr();
diff --git a/lib/std/zig/parser_test.zig b/lib/std/zig/parser_test.zig
index a74d53f21c..2bb8c848bc 100644
--- a/lib/std/zig/parser_test.zig
+++ b/lib/std/zig/parser_test.zig
@@ -3064,6 +3064,13 @@ test "zig fmt: struct declaration" {
\\ c: u8,
\\};
\\
+ \\const Ps = packed struct(u32) {
+ \\ a: u1,
+ \\ b: u2,
+ \\
+ \\ c: u29,
+ \\};
+ \\
\\const Es = extern struct {
\\ a: u8,
\\ b: u8,
@@ -4247,10 +4254,10 @@ test "zig fmt: integer literals with underscore separators" {
\\const
\\ x =
\\ 1_234_567
- \\ + (0b0_1-0o7_0+0xff_FF ) + 0_0;
+ \\ + (0b0_1-0o7_0+0xff_FF ) + 1_0;
,
\\const x =
- \\ 1_234_567 + (0b0_1 - 0o7_0 + 0xff_FF) + 0_0;
+ \\ 1_234_567 + (0b0_1 - 0o7_0 + 0xff_FF) + 1_0;
\\
);
}
@@ -4259,7 +4266,7 @@ test "zig fmt: hex literals with underscore separators" {
try testTransform(
\\pub fn orMask(a: [ 1_000 ]u64, b: [ 1_000] u64) [1_000]u64 {
\\ var c: [1_000]u64 = [1]u64{ 0xFFFF_FFFF_FFFF_FFFF}**1_000;
- \\ for (c [ 0_0 .. ]) |_, i| {
+ \\ for (c [ 1_0 .. ]) |_, i| {
\\ c[i] = (a[i] | b[i]) & 0xCCAA_CCAA_CCAA_CCAA;
\\ }
\\ return c;
@@ -4269,7 +4276,7 @@ test "zig fmt: hex literals with underscore separators" {
,
\\pub fn orMask(a: [1_000]u64, b: [1_000]u64) [1_000]u64 {
\\ var c: [1_000]u64 = [1]u64{0xFFFF_FFFF_FFFF_FFFF} ** 1_000;
- \\ for (c[0_0..]) |_, i| {
+ \\ for (c[1_0..]) |_, i| {
\\ c[i] = (a[i] | b[i]) & 0xCCAA_CCAA_CCAA_CCAA;
\\ }
\\ return c;
@@ -4281,14 +4288,14 @@ test "zig fmt: hex literals with underscore separators" {
test "zig fmt: decimal float literals with underscore separators" {
try testTransform(
\\pub fn main() void {
- \\ const a:f64=(10.0e-0+(10.0e+0))+10_00.00_00e-2+00_00.00_10e+4;
- \\ const b:f64=010.0--0_10.0+0_1_0.0_0+1e2;
+ \\ const a:f64=(10.0e-0+(10.0e+0))+10_00.00_00e-2+20_00.00_10e+4;
+ \\ const b:f64=1_0.0--10_10.0+1_0_0.0_0+1e2;
\\ std.debug.warn("a: {}, b: {} -> a+b: {}\n", .{ a, b, a + b });
\\}
,
\\pub fn main() void {
- \\ const a: f64 = (10.0e-0 + (10.0e+0)) + 10_00.00_00e-2 + 00_00.00_10e+4;
- \\ const b: f64 = 010.0 - -0_10.0 + 0_1_0.0_0 + 1e2;
+ \\ const a: f64 = (10.0e-0 + (10.0e+0)) + 10_00.00_00e-2 + 20_00.00_10e+4;
+ \\ const b: f64 = 1_0.0 - -10_10.0 + 1_0_0.0_0 + 1e2;
\\ std.debug.warn("a: {}, b: {} -> a+b: {}\n", .{ a, b, a + b });
\\}
\\
diff --git a/lib/std/zig/system/NativePaths.zig b/lib/std/zig/system/NativePaths.zig
index e9e7460314..5f52b04ce4 100644
--- a/lib/std/zig/system/NativePaths.zig
+++ b/lib/std/zig/system/NativePaths.zig
@@ -109,6 +109,8 @@ pub fn detect(allocator: Allocator, native_info: NativeTargetInfo) !NativePaths
if (native_target.os.tag != .windows) {
const triple = try native_target.linuxTriple(allocator);
+ defer allocator.free(triple);
+
const qual = native_target.cpu.arch.ptrBitWidth();
// TODO: $ ld --verbose | grep SEARCH_DIR
diff --git a/lib/std/zig/system/NativeTargetInfo.zig b/lib/std/zig/system/NativeTargetInfo.zig
index 002ad49d64..824a1a26b6 100644
--- a/lib/std/zig/system/NativeTargetInfo.zig
+++ b/lib/std/zig/system/NativeTargetInfo.zig
@@ -237,7 +237,7 @@ pub fn detect(allocator: Allocator, cross_target: CrossTarget) DetectError!Nativ
/// First we attempt to use the executable's own binary. If it is dynamically
/// linked, then it should answer both the C ABI question and the dynamic linker question.
-/// If it is statically linked, then we try /usr/bin/env. If that does not provide the answer, then
+/// If it is statically linked, then we try /usr/bin/env (or the file it references in shebang). If that does not provide the answer, then
/// we fall back to the defaults.
/// TODO Remove the Allocator requirement from this function.
fn detectAbiAndDynamicLinker(
@@ -276,6 +276,7 @@ fn detectAbiAndDynamicLinker(
};
var ld_info_list_buffer: [all_abis.len]LdInfo = undefined;
var ld_info_list_len: usize = 0;
+ const ofmt = cross_target.ofmt orelse Target.ObjectFormat.default(os.tag, cpu.arch);
for (all_abis) |abi| {
// This may be a nonsensical parameter. We detect this with error.UnknownDynamicLinkerPath and
@@ -284,6 +285,7 @@ fn detectAbiAndDynamicLinker(
.cpu = cpu,
.os = os,
.abi = abi,
+ .ofmt = ofmt,
};
const ld = target.standardDynamicLinkerPath();
if (ld.get() == null) continue;
@@ -346,6 +348,7 @@ fn detectAbiAndDynamicLinker(
.cpu = cpu,
.os = os_adjusted,
.abi = cross_target.abi orelse found_ld_info.abi,
+ .ofmt = cross_target.ofmt orelse Target.ObjectFormat.default(os_adjusted.tag, cpu.arch),
},
.dynamic_linker = if (cross_target.dynamic_linker.get() == null)
DynamicLinker.init(found_ld_path)
@@ -355,37 +358,77 @@ fn detectAbiAndDynamicLinker(
return result;
}
- const env_file = std.fs.openFileAbsoluteZ("/usr/bin/env", .{}) catch |err| switch (err) {
- error.NoSpaceLeft => unreachable,
- error.NameTooLong => unreachable,
- error.PathAlreadyExists => unreachable,
- error.SharingViolation => unreachable,
- error.InvalidUtf8 => unreachable,
- error.BadPathName => unreachable,
- error.PipeBusy => unreachable,
- error.FileLocksNotSupported => unreachable,
- error.WouldBlock => unreachable,
- error.FileBusy => unreachable, // opened without write permissions
+ const elf_file = blk: {
+ // This block looks for a shebang line in /usr/bin/env,
+ // if it finds one, then instead of using /usr/bin/env as the ELF file to examine, it uses the file it references instead,
+ // doing the same logic recursively in case it finds another shebang line.
- error.IsDir,
- error.NotDir,
- error.InvalidHandle,
- error.AccessDenied,
- error.NoDevice,
- error.FileNotFound,
- error.FileTooBig,
- error.Unexpected,
- => return defaultAbiAndDynamicLinker(cpu, os, cross_target),
+ // Since /usr/bin/env is hard-coded into the shebang line of many portable scripts, it's a
+ // reasonably reliable path to start with.
+ var file_name: []const u8 = "/usr/bin/env";
+ // #! (2) + 255 (max length of shebang line since Linux 5.1) + \n (1)
+ var buffer: [258]u8 = undefined;
+ while (true) {
+ const file = std.fs.openFileAbsolute(file_name, .{}) catch |err| switch (err) {
+ error.NoSpaceLeft => unreachable,
+ error.NameTooLong => unreachable,
+ error.PathAlreadyExists => unreachable,
+ error.SharingViolation => unreachable,
+ error.InvalidUtf8 => unreachable,
+ error.BadPathName => unreachable,
+ error.PipeBusy => unreachable,
+ error.FileLocksNotSupported => unreachable,
+ error.WouldBlock => unreachable,
+ error.FileBusy => unreachable, // opened without write permissions
- else => |e| return e,
+ error.IsDir,
+ error.NotDir,
+ error.InvalidHandle,
+ error.AccessDenied,
+ error.NoDevice,
+ error.FileNotFound,
+ error.FileTooBig,
+ error.Unexpected,
+ => |e| {
+ std.log.warn("Encoutered error: {s}, falling back to default ABI and dynamic linker.\n", .{@errorName(e)});
+ return defaultAbiAndDynamicLinker(cpu, os, cross_target);
+ },
+
+ else => |e| return e,
+ };
+
+ const line = file.reader().readUntilDelimiter(&buffer, '\n') catch |err| switch (err) {
+ error.IsDir => unreachable, // Handled before
+ error.AccessDenied => unreachable,
+ error.WouldBlock => unreachable, // Did not request blocking mode
+ error.OperationAborted => unreachable, // Windows-only
+ error.BrokenPipe => unreachable,
+ error.ConnectionResetByPeer => unreachable,
+ error.ConnectionTimedOut => unreachable,
+ error.InputOutput => unreachable,
+ error.Unexpected => unreachable,
+
+ error.StreamTooLong,
+ error.EndOfStream,
+ error.NotOpenForReading,
+ => break :blk file,
+
+ else => |e| {
+ file.close();
+ return e;
+ },
+ };
+ if (!mem.startsWith(u8, line, "#!")) break :blk file;
+ var it = std.mem.tokenize(u8, line[2..], " ");
+ file.close();
+ file_name = it.next() orelse return defaultAbiAndDynamicLinker(cpu, os, cross_target);
+ }
};
- defer env_file.close();
+ defer elf_file.close();
// If Zig is statically linked, such as via distributed binary static builds, the above
- // trick won't work. The next thing we fall back to is the same thing, but for /usr/bin/env.
- // Since that path is hard-coded into the shebang line of many portable scripts, it's a
- // reasonably reliable path to check for.
- return abiAndDynamicLinkerFromFile(env_file, cpu, os, ld_info_list, cross_target) catch |err| switch (err) {
+ // trick (block self_exe) won't work. The next thing we fall back to is the same thing, but for elf_file.
+ return abiAndDynamicLinkerFromFile(elf_file, cpu, os, ld_info_list, cross_target) catch |err| switch (err) {
error.FileSystem,
error.SystemResources,
error.SymLinkLoop,
@@ -403,7 +446,10 @@ fn detectAbiAndDynamicLinker(
error.UnexpectedEndOfFile,
error.NameTooLong,
// Finally, we fall back on the standard path.
- => defaultAbiAndDynamicLinker(cpu, os, cross_target),
+ => |e| {
+ std.log.warn("Encoutered error: {s}, falling back to default ABI and dynamic linker.\n", .{@errorName(e)});
+ return defaultAbiAndDynamicLinker(cpu, os, cross_target);
+ },
};
}
@@ -496,6 +542,7 @@ pub fn abiAndDynamicLinkerFromFile(
.cpu = cpu,
.os = os,
.abi = cross_target.abi orelse Target.Abi.default(cpu.arch, os),
+ .ofmt = cross_target.ofmt orelse Target.ObjectFormat.default(os.tag, cpu.arch),
},
.dynamic_linker = cross_target.dynamic_linker,
};
@@ -786,6 +833,7 @@ fn defaultAbiAndDynamicLinker(cpu: Target.Cpu, os: Target.Os, cross_target: Cros
.cpu = cpu,
.os = os,
.abi = cross_target.abi orelse Target.Abi.default(cpu.arch, os),
+ .ofmt = cross_target.ofmt orelse Target.ObjectFormat.default(os.tag, cpu.arch),
};
return NativeTargetInfo{
.target = target,
@@ -804,13 +852,13 @@ pub const LdInfo = struct {
pub fn elfInt(is_64: bool, need_bswap: bool, int_32: anytype, int_64: anytype) @TypeOf(int_64) {
if (is_64) {
if (need_bswap) {
- return @byteSwap(@TypeOf(int_64), int_64);
+ return @byteSwap(int_64);
} else {
return int_64;
}
} else {
if (need_bswap) {
- return @byteSwap(@TypeOf(int_32), int_32);
+ return @byteSwap(int_32);
} else {
return int_32;
}
diff --git a/lib/std/zig/tokenizer.zig b/lib/std/zig/tokenizer.zig
index 8a0885ab7b..eaa0ddd716 100644
--- a/lib/std/zig/tokenizer.zig
+++ b/lib/std/zig/tokenizer.zig
@@ -1,5 +1,4 @@
const std = @import("../std.zig");
-const mem = std.mem;
pub const Token = struct {
tag: Tag,
@@ -350,7 +349,7 @@ pub const Tokenizer = struct {
pub fn init(buffer: [:0]const u8) Tokenizer {
// Skip the UTF-8 BOM if present
- const src_start = if (mem.startsWith(u8, buffer, "\xEF\xBB\xBF")) 3 else @as(usize, 0);
+ const src_start: usize = if (std.mem.startsWith(u8, buffer, "\xEF\xBB\xBF")) 3 else 0;
return Tokenizer{
.buffer = buffer,
.index = src_start,
@@ -797,6 +796,10 @@ pub const Tokenizer = struct {
remaining_code_units = 3;
state = .char_literal_unicode;
},
+ '\n' => {
+ result.tag = .invalid;
+ break;
+ },
else => {
state = .char_literal_end;
},
@@ -1429,8 +1432,8 @@ pub const Tokenizer = struct {
fn getInvalidCharacterLength(self: *Tokenizer) u3 {
const c0 = self.buffer[self.index];
- if (c0 < 0x80) {
- if (c0 < 0x20 or c0 == 0x7f) {
+ if (std.ascii.isASCII(c0)) {
+ if (std.ascii.isCntrl(c0)) {
// ascii control codes are never allowed
// (note that \n was checked before we got here)
return 1;
@@ -1465,8 +1468,8 @@ pub const Tokenizer = struct {
}
};
-test "tokenizer" {
- try testTokenize("test", &.{.keyword_test});
+test "keywords" {
+ try testTokenize("test const else", &.{ .keyword_test, .keyword_const, .keyword_else });
}
test "line comment followed by top-level comptime" {
@@ -1481,7 +1484,7 @@ test "line comment followed by top-level comptime" {
});
}
-test "tokenizer - unknown length pointer and then c pointer" {
+test "unknown length pointer and then c pointer" {
try testTokenize(
\\[*]u8
\\[*c]u8
@@ -1498,7 +1501,7 @@ test "tokenizer - unknown length pointer and then c pointer" {
});
}
-test "tokenizer - code point literal with hex escape" {
+test "code point literal with hex escape" {
try testTokenize(
\\'\x1b'
, &.{.char_literal});
@@ -1507,7 +1510,21 @@ test "tokenizer - code point literal with hex escape" {
, &.{ .invalid, .invalid });
}
-test "tokenizer - code point literal with unicode escapes" {
+test "newline in char literal" {
+ try testTokenize(
+ \\'
+ \\'
+ , &.{ .invalid, .invalid });
+}
+
+test "newline in string literal" {
+ try testTokenize(
+ \\"
+ \\"
+ , &.{ .invalid, .string_literal });
+}
+
+test "code point literal with unicode escapes" {
// Valid unicode escapes
try testTokenize(
\\'\u{3}'
@@ -1557,13 +1574,13 @@ test "tokenizer - code point literal with unicode escapes" {
, &.{ .invalid, .integer_literal, .invalid });
}
-test "tokenizer - code point literal with unicode code point" {
+test "code point literal with unicode code point" {
try testTokenize(
\\'💩'
, &.{.char_literal});
}
-test "tokenizer - float literal e exponent" {
+test "float literal e exponent" {
try testTokenize("a = 4.94065645841246544177e-324;\n", &.{
.identifier,
.equal,
@@ -1572,7 +1589,7 @@ test "tokenizer - float literal e exponent" {
});
}
-test "tokenizer - float literal p exponent" {
+test "float literal p exponent" {
try testTokenize("a = 0x1.a827999fcef32p+1022;\n", &.{
.identifier,
.equal,
@@ -1581,11 +1598,11 @@ test "tokenizer - float literal p exponent" {
});
}
-test "tokenizer - chars" {
+test "chars" {
try testTokenize("'c'", &.{.char_literal});
}
-test "tokenizer - invalid token characters" {
+test "invalid token characters" {
try testTokenize("#", &.{.invalid});
try testTokenize("`", &.{.invalid});
try testTokenize("'c", &.{.invalid});
@@ -1593,7 +1610,7 @@ test "tokenizer - invalid token characters" {
try testTokenize("''", &.{ .invalid, .invalid });
}
-test "tokenizer - invalid literal/comment characters" {
+test "invalid literal/comment characters" {
try testTokenize("\"\x00\"", &.{
.string_literal,
.invalid,
@@ -1609,12 +1626,12 @@ test "tokenizer - invalid literal/comment characters" {
});
}
-test "tokenizer - utf8" {
+test "utf8" {
try testTokenize("//\xc2\x80", &.{});
try testTokenize("//\xf4\x8f\xbf\xbf", &.{});
}
-test "tokenizer - invalid utf8" {
+test "invalid utf8" {
try testTokenize("//\x80", &.{
.invalid,
});
@@ -1641,7 +1658,7 @@ test "tokenizer - invalid utf8" {
});
}
-test "tokenizer - illegal unicode codepoints" {
+test "illegal unicode codepoints" {
// unicode newline characters.U+0085, U+2028, U+2029
try testTokenize("//\xc2\x84", &.{});
try testTokenize("//\xc2\x85", &.{
@@ -1658,7 +1675,7 @@ test "tokenizer - illegal unicode codepoints" {
try testTokenize("//\xe2\x80\xaa", &.{});
}
-test "tokenizer - string identifier and builtin fns" {
+test "string identifier and builtin fns" {
try testTokenize(
\\const @"if" = @import("std");
, &.{
@@ -1673,7 +1690,7 @@ test "tokenizer - string identifier and builtin fns" {
});
}
-test "tokenizer - multiline string literal with literal tab" {
+test "multiline string literal with literal tab" {
try testTokenize(
\\\\foo bar
, &.{
@@ -1681,7 +1698,7 @@ test "tokenizer - multiline string literal with literal tab" {
});
}
-test "tokenizer - comments with literal tab" {
+test "comments with literal tab" {
try testTokenize(
\\//foo bar
\\//!foo bar
@@ -1697,14 +1714,14 @@ test "tokenizer - comments with literal tab" {
});
}
-test "tokenizer - pipe and then invalid" {
+test "pipe and then invalid" {
try testTokenize("||=", &.{
.pipe_pipe,
.equal,
});
}
-test "tokenizer - line comment and doc comment" {
+test "line comment and doc comment" {
try testTokenize("//", &.{});
try testTokenize("// a / b", &.{});
try testTokenize("// /", &.{});
@@ -1715,7 +1732,7 @@ test "tokenizer - line comment and doc comment" {
try testTokenize("//!!", &.{.container_doc_comment});
}
-test "tokenizer - line comment followed by identifier" {
+test "line comment followed by identifier" {
try testTokenize(
\\ Unexpected,
\\ // another
@@ -1728,7 +1745,7 @@ test "tokenizer - line comment followed by identifier" {
});
}
-test "tokenizer - UTF-8 BOM is recognized and skipped" {
+test "UTF-8 BOM is recognized and skipped" {
try testTokenize("\xEF\xBB\xBFa;\n", &.{
.identifier,
.semicolon,
@@ -1770,7 +1787,7 @@ test "correctly parse pointer dereference followed by asterisk" {
});
}
-test "tokenizer - range literals" {
+test "range literals" {
try testTokenize("0...9", &.{ .integer_literal, .ellipsis3, .integer_literal });
try testTokenize("'0'...'9'", &.{ .char_literal, .ellipsis3, .char_literal });
try testTokenize("0x00...0x09", &.{ .integer_literal, .ellipsis3, .integer_literal });
@@ -1778,7 +1795,7 @@ test "tokenizer - range literals" {
try testTokenize("0o00...0o11", &.{ .integer_literal, .ellipsis3, .integer_literal });
}
-test "tokenizer - number literals decimal" {
+test "number literals decimal" {
try testTokenize("0", &.{.integer_literal});
try testTokenize("1", &.{.integer_literal});
try testTokenize("2", &.{.integer_literal});
@@ -1845,7 +1862,7 @@ test "tokenizer - number literals decimal" {
try testTokenize("1.0e0_+", &.{ .invalid, .plus });
}
-test "tokenizer - number literals binary" {
+test "number literals binary" {
try testTokenize("0b0", &.{.integer_literal});
try testTokenize("0b1", &.{.integer_literal});
try testTokenize("0b2", &.{ .invalid, .integer_literal });
@@ -1884,7 +1901,7 @@ test "tokenizer - number literals binary" {
try testTokenize("0b1_,", &.{ .invalid, .comma });
}
-test "tokenizer - number literals octal" {
+test "number literals octal" {
try testTokenize("0o0", &.{.integer_literal});
try testTokenize("0o1", &.{.integer_literal});
try testTokenize("0o2", &.{.integer_literal});
@@ -1923,7 +1940,7 @@ test "tokenizer - number literals octal" {
try testTokenize("0o_,", &.{ .invalid, .identifier, .comma });
}
-test "tokenizer - number literals hexadecimal" {
+test "number literals hexadecimal" {
try testTokenize("0x0", &.{.integer_literal});
try testTokenize("0x1", &.{.integer_literal});
try testTokenize("0x2", &.{.integer_literal});
@@ -2011,22 +2028,22 @@ test "tokenizer - number literals hexadecimal" {
try testTokenize("0x0.0p0_", &.{ .invalid, .eof });
}
-test "tokenizer - multi line string literal with only 1 backslash" {
+test "multi line string literal with only 1 backslash" {
try testTokenize("x \\\n;", &.{ .identifier, .invalid, .semicolon });
}
-test "tokenizer - invalid builtin identifiers" {
+test "invalid builtin identifiers" {
try testTokenize("@()", &.{ .invalid, .l_paren, .r_paren });
try testTokenize("@0()", &.{ .invalid, .integer_literal, .l_paren, .r_paren });
}
-test "tokenizer - invalid token with unfinished escape right before eof" {
+test "invalid token with unfinished escape right before eof" {
try testTokenize("\"\\", &.{.invalid});
try testTokenize("'\\", &.{.invalid});
try testTokenize("'\\u", &.{.invalid});
}
-test "tokenizer - saturating" {
+test "saturating operators" {
try testTokenize("<<", &.{.angle_bracket_angle_bracket_left});
try testTokenize("<<|", &.{.angle_bracket_angle_bracket_left_pipe});
try testTokenize("<<|=", &.{.angle_bracket_angle_bracket_left_pipe_equal});
@@ -2044,17 +2061,14 @@ test "tokenizer - saturating" {
try testTokenize("-|=", &.{.minus_pipe_equal});
}
-fn testTokenize(source: [:0]const u8, expected_tokens: []const Token.Tag) !void {
+fn testTokenize(source: [:0]const u8, expected_token_tags: []const Token.Tag) !void {
var tokenizer = Tokenizer.init(source);
- for (expected_tokens) |expected_token_id| {
+ for (expected_token_tags) |expected_token_tag| {
const token = tokenizer.next();
- if (token.tag != expected_token_id) {
- std.debug.panic("expected {s}, found {s}\n", .{
- @tagName(expected_token_id), @tagName(token.tag),
- });
- }
+ try std.testing.expectEqual(expected_token_tag, token.tag);
}
const last_token = tokenizer.next();
try std.testing.expectEqual(Token.Tag.eof, last_token.tag);
try std.testing.expectEqual(source.len, last_token.loc.start);
+ try std.testing.expectEqual(source.len, last_token.loc.end);
}
diff --git a/src/Air.zig b/src/Air.zig
index 302822fc99..e08993bbed 100644
--- a/src/Air.zig
+++ b/src/Air.zig
@@ -660,6 +660,10 @@ pub const Inst = struct {
/// Uses the `pl_op` field with payload `AtomicRmw`. Operand is `ptr`.
atomic_rmw,
+ /// Returns true if enum tag value has a name.
+ /// Uses the `un_op` field.
+ is_named_enum_value,
+
/// Given an enum tag value, returns the tag name. The enum type may be non-exhaustive.
/// Result type is always `[:0]const u8`.
/// Uses the `un_op` field.
@@ -669,6 +673,10 @@ pub const Inst = struct {
/// Uses the `un_op` field.
error_name,
+ /// Returns true if error set has error with value.
+ /// Uses the `ty_op` field.
+ error_set_has_value,
+
/// Constructs a vector, tuple, struct, or array value out of runtime-known elements.
/// Some of the elements may be comptime-known.
/// Uses the `ty_pl` field, payload is index of an array of elements, each of which
@@ -1057,6 +1065,8 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.is_non_err,
.is_err_ptr,
.is_non_err_ptr,
+ .is_named_enum_value,
+ .error_set_has_value,
=> return Type.bool,
.const_ty => return Type.type,
diff --git a/src/AstGen.zig b/src/AstGen.zig
index e30913ac76..943d0aad08 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -152,6 +152,7 @@ pub fn generate(gpa: Allocator, tree: Ast) Allocator.Error!Zir {
0,
tree.containerDeclRoot(),
.Auto,
+ 0,
)) |struct_decl_ref| {
assert(refToIndex(struct_decl_ref).? == 0);
} else |err| switch (err) {
@@ -859,7 +860,12 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index) InnerEr
},
.enum_literal => return simpleStrTok(gz, rl, main_tokens[node], node, .enum_literal),
.error_value => return simpleStrTok(gz, rl, node_datas[node].rhs, node, .error_value),
- .anyframe_literal => return rvalue(gz, rl, .anyframe_type, node),
+ // TODO restore this when implementing https://github.com/ziglang/zig/issues/6025
+ // .anyframe_literal => return rvalue(gz, rl, .anyframe_type, node),
+ .anyframe_literal => {
+ const result = try gz.addUnNode(.anyframe_type, .void_type, node);
+ return rvalue(gz, rl, result, node);
+ },
.anyframe_type => {
const return_type = try typeExpr(gz, scope, node_datas[node].rhs);
const result = try gz.addUnNode(.anyframe_type, return_type, node);
@@ -1158,6 +1164,10 @@ fn fnProtoExpr(
const tree = astgen.tree;
const token_tags = tree.tokens.items(.tag);
+ if (fn_proto.name_token) |some| {
+ return astgen.failTok(some, "function type cannot have a name", .{});
+ }
+
const is_extern = blk: {
const maybe_extern_token = fn_proto.extern_export_inline_token orelse break :blk false;
break :blk token_tags[maybe_extern_token] == .keyword_extern;
@@ -2449,7 +2459,6 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As
.trunc,
.round,
.tag_name,
- .reify,
.type_name,
.frame_type,
.frame_size,
@@ -2496,7 +2505,6 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As
.closure_get,
.array_base_ptr,
.field_base_ptr,
- .param_type,
.ret_ptr,
.ret_type,
.@"try",
@@ -3066,6 +3074,19 @@ fn emitDbgNode(gz: *GenZir, node: Ast.Node.Index) !void {
const line = astgen.source_line - gz.decl_line;
const column = astgen.source_column;
+ if (gz.instructions.items.len > 0) {
+ const last = gz.instructions.items[gz.instructions.items.len - 1];
+ const zir_tags = astgen.instructions.items(.tag);
+ if (zir_tags[last] == .dbg_stmt) {
+ const zir_datas = astgen.instructions.items(.data);
+ zir_datas[last].dbg_stmt = .{
+ .line = line,
+ .column = column,
+ };
+ return;
+ }
+ }
+
_ = try gz.add(.{ .tag = .dbg_stmt, .data = .{
.dbg_stmt = .{
.line = line,
@@ -4071,6 +4092,13 @@ fn testDecl(
true => .signed,
false => .unsigned,
};
+ if (ident_name_raw.len >= 3 and ident_name_raw[1] == '0') {
+ return astgen.failTok(
+ test_name_token,
+ "primitive integer type '{s}' has leading zero",
+ .{ident_name_raw},
+ );
+ }
_ = parseBitCount(ident_name_raw[1..]) catch |err| switch (err) {
error.Overflow => return astgen.failTok(
test_name_token,
@@ -4207,15 +4235,18 @@ fn structDeclInner(
node: Ast.Node.Index,
container_decl: Ast.full.ContainerDecl,
layout: std.builtin.Type.ContainerLayout,
+ backing_int_node: Ast.Node.Index,
) InnerError!Zir.Inst.Ref {
const decl_inst = try gz.reserveInstructionIndex();
- if (container_decl.ast.members.len == 0) {
+ if (container_decl.ast.members.len == 0 and backing_int_node == 0) {
try gz.setStruct(decl_inst, .{
.src_node = node,
.layout = layout,
.fields_len = 0,
.decls_len = 0,
+ .backing_int_ref = .none,
+ .backing_int_body_len = 0,
.known_non_opv = false,
.known_comptime_only = false,
});
@@ -4238,10 +4269,13 @@ fn structDeclInner(
// are in scope, so that field types, alignments, and default value expressions
// can refer to decls within the struct itself.
astgen.advanceSourceCursorToNode(node);
+ // If `node == 0` then this is the root struct and all the declarations should
+ // be relative to the beginning of the file.
+ const decl_line = if (node == 0) 0 else astgen.source_line;
var block_scope: GenZir = .{
.parent = &namespace.base,
.decl_node_index = node,
- .decl_line = astgen.source_line,
+ .decl_line = decl_line,
.astgen = astgen,
.force_comptime = true,
.in_defer = false,
@@ -4250,6 +4284,35 @@ fn structDeclInner(
};
defer block_scope.unstack();
+ const scratch_top = astgen.scratch.items.len;
+ defer astgen.scratch.items.len = scratch_top;
+
+ var backing_int_body_len: usize = 0;
+ const backing_int_ref: Zir.Inst.Ref = blk: {
+ if (backing_int_node != 0) {
+ if (layout != .Packed) {
+ return astgen.failNode(backing_int_node, "non-packed struct does not support backing integer type", .{});
+ } else {
+ const backing_int_ref = try typeExpr(&block_scope, &namespace.base, backing_int_node);
+ if (!block_scope.isEmpty()) {
+ if (!block_scope.endsWithNoReturn()) {
+ _ = try block_scope.addBreak(.break_inline, decl_inst, backing_int_ref);
+ }
+
+ const body = block_scope.instructionsSlice();
+ const old_scratch_len = astgen.scratch.items.len;
+ try astgen.scratch.ensureUnusedCapacity(gpa, countBodyLenAfterFixups(astgen, body));
+ appendBodyWithFixupsArrayList(astgen, &astgen.scratch, body);
+ backing_int_body_len = astgen.scratch.items.len - old_scratch_len;
+ block_scope.instructions.items.len = block_scope.instructions_top;
+ }
+ break :blk backing_int_ref;
+ }
+ } else {
+ break :blk .none;
+ }
+ };
+
const decl_count = try astgen.scanDecls(&namespace, container_decl.ast.members);
const field_count = @intCast(u32, container_decl.ast.members.len - decl_count);
@@ -4274,7 +4337,7 @@ fn structDeclInner(
var known_non_opv = false;
var known_comptime_only = false;
for (container_decl.ast.members) |member_node| {
- const member = switch (try containerMember(gz, &namespace.base, &wip_members, member_node)) {
+ const member = switch (try containerMember(&block_scope, &namespace.base, &wip_members, member_node)) {
.decl => continue,
.field => |field| field,
};
@@ -4362,6 +4425,8 @@ fn structDeclInner(
.layout = layout,
.fields_len = field_count,
.decls_len = decl_count,
+ .backing_int_ref = backing_int_ref,
+ .backing_int_body_len = @intCast(u32, backing_int_body_len),
.known_non_opv = known_non_opv,
.known_comptime_only = known_comptime_only,
});
@@ -4370,7 +4435,9 @@ fn structDeclInner(
const decls_slice = wip_members.declsSlice();
const fields_slice = wip_members.fieldsSlice();
const bodies_slice = astgen.scratch.items[bodies_start..];
- try astgen.extra.ensureUnusedCapacity(gpa, decls_slice.len + fields_slice.len + bodies_slice.len);
+ try astgen.extra.ensureUnusedCapacity(gpa, backing_int_body_len +
+ decls_slice.len + fields_slice.len + bodies_slice.len);
+ astgen.extra.appendSliceAssumeCapacity(astgen.scratch.items[scratch_top..][0..backing_int_body_len]);
astgen.extra.appendSliceAssumeCapacity(decls_slice);
astgen.extra.appendSliceAssumeCapacity(fields_slice);
astgen.extra.appendSliceAssumeCapacity(bodies_slice);
@@ -4441,7 +4508,7 @@ fn unionDeclInner(
defer wip_members.deinit();
for (members) |member_node| {
- const member = switch (try containerMember(gz, &namespace.base, &wip_members, member_node)) {
+ const member = switch (try containerMember(&block_scope, &namespace.base, &wip_members, member_node)) {
.decl => continue,
.field => |field| field,
};
@@ -4504,9 +4571,6 @@ fn unionDeclInner(
wip_members.appendToField(@enumToInt(tag_value));
}
}
- if (field_count == 0) {
- return astgen.failNode(node, "union declarations must have at least one tag", .{});
- }
if (!block_scope.isEmpty()) {
_ = try block_scope.addBreak(.break_inline, decl_inst, .void_value);
@@ -4566,9 +4630,7 @@ fn containerDecl(
else => unreachable,
} else std.builtin.Type.ContainerLayout.Auto;
- assert(container_decl.ast.arg == 0);
-
- const result = try structDeclInner(gz, scope, node, container_decl, layout);
+ const result = try structDeclInner(gz, scope, node, container_decl, layout, container_decl.ast.arg);
return rvalue(gz, rl, result, node);
},
.keyword_union => {
@@ -4664,12 +4726,6 @@ fn containerDecl(
.nonexhaustive_node = nonexhaustive_node,
};
};
- if (counts.total_fields == 0 and counts.nonexhaustive_node == 0) {
- // One can construct an enum with no tags, and it functions the same as `noreturn`. But
- // this is only useful for generic code; when explicitly using `enum {}` syntax, there
- // must be at least one tag.
- try astgen.appendErrorNode(node, "enum declarations must have at least one tag", .{});
- }
if (counts.nonexhaustive_node != 0 and container_decl.ast.arg == 0) {
try astgen.appendErrorNodeNotes(
node,
@@ -4728,7 +4784,7 @@ fn containerDecl(
for (container_decl.ast.members) |member_node| {
if (member_node == counts.nonexhaustive_node)
continue;
- const member = switch (try containerMember(gz, &namespace.base, &wip_members, member_node)) {
+ const member = switch (try containerMember(&block_scope, &namespace.base, &wip_members, member_node)) {
.decl => continue,
.field => |field| field,
};
@@ -4806,13 +4862,26 @@ fn containerDecl(
};
defer namespace.deinit(gpa);
+ astgen.advanceSourceCursorToNode(node);
+ var block_scope: GenZir = .{
+ .parent = &namespace.base,
+ .decl_node_index = node,
+ .decl_line = astgen.source_line,
+ .astgen = astgen,
+ .force_comptime = true,
+ .in_defer = false,
+ .instructions = gz.instructions,
+ .instructions_top = gz.instructions.items.len,
+ };
+ defer block_scope.unstack();
+
const decl_count = try astgen.scanDecls(&namespace, container_decl.ast.members);
var wip_members = try WipMembers.init(gpa, &astgen.scratch, decl_count, 0, 0, 0);
defer wip_members.deinit();
for (container_decl.ast.members) |member_node| {
- const res = try containerMember(gz, &namespace.base, &wip_members, member_node);
+ const res = try containerMember(&block_scope, &namespace.base, &wip_members, member_node);
if (res == .field) {
return astgen.failNode(member_node, "opaque types cannot have fields", .{});
}
@@ -5033,6 +5102,16 @@ fn tryExpr(
if (parent_gz.in_defer) return astgen.failNode(node, "'try' not allowed inside defer expression", .{});
+ // Ensure debug line/column information is emitted for this try expression.
+ // Then we will save the line/column so that we can emit another one that goes
+ // "backwards" because we want to evaluate the operand, but then put the debug
+ // info back at the try keyword for error return tracing.
+ if (!parent_gz.force_comptime) {
+ try emitDbgNode(parent_gz, node);
+ }
+ const try_line = astgen.source_line - parent_gz.decl_line;
+ const try_column = astgen.source_column;
+
const operand_rl: ResultLoc = switch (rl) {
.ref => .ref,
else => .none,
@@ -5062,6 +5141,7 @@ fn tryExpr(
};
const err_code = try else_scope.addUnNode(err_tag, operand, node);
try genDefers(&else_scope, &fn_block.base, scope, .{ .both = err_code });
+ try emitDbgStmt(&else_scope, try_line, try_column);
_ = try else_scope.addUnNode(.ret_node, err_code, node);
try else_scope.setTryBody(try_inst, operand);
@@ -6568,6 +6648,16 @@ fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref
if (gz.in_defer) return astgen.failNode(node, "cannot return from defer expression", .{});
+ // Ensure debug line/column information is emitted for this return expression.
+ // Then we will save the line/column so that we can emit another one that goes
+ // "backwards" because we want to evaluate the operand, but then put the debug
+ // info back at the return keyword for error return tracing.
+ if (!gz.force_comptime) {
+ try emitDbgNode(gz, node);
+ }
+ const ret_line = astgen.source_line - gz.decl_line;
+ const ret_column = astgen.source_column;
+
const defer_outer = &astgen.fn_block.?.base;
const operand_node = node_datas[node].lhs;
@@ -6586,11 +6676,13 @@ fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref
const defer_counts = countDefers(astgen, defer_outer, scope);
if (!defer_counts.need_err_code) {
try genDefers(gz, defer_outer, scope, .both_sans_err);
+ try emitDbgStmt(gz, ret_line, ret_column);
_ = try gz.addStrTok(.ret_err_value, err_name_str_index, ident_token);
return Zir.Inst.Ref.unreachable_value;
}
const err_code = try gz.addStrTok(.ret_err_value_code, err_name_str_index, ident_token);
try genDefers(gz, defer_outer, scope, .{ .both = err_code });
+ try emitDbgStmt(gz, ret_line, ret_column);
_ = try gz.addUnNode(.ret_node, err_code, node);
return Zir.Inst.Ref.unreachable_value;
}
@@ -6609,6 +6701,7 @@ fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref
.never => {
// Returning a value that cannot be an error; skip error defers.
try genDefers(gz, defer_outer, scope, .normal_only);
+ try emitDbgStmt(gz, ret_line, ret_column);
try gz.addRet(rl, operand, node);
return Zir.Inst.Ref.unreachable_value;
},
@@ -6616,6 +6709,7 @@ fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref
// Value is always an error. Emit both error defers and regular defers.
const err_code = if (rl == .ptr) try gz.addUnNode(.load, rl.ptr, node) else operand;
try genDefers(gz, defer_outer, scope, .{ .both = err_code });
+ try emitDbgStmt(gz, ret_line, ret_column);
try gz.addRet(rl, operand, node);
return Zir.Inst.Ref.unreachable_value;
},
@@ -6624,6 +6718,7 @@ fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref
if (!defer_counts.have_err) {
// Only regular defers; no branch needed.
try genDefers(gz, defer_outer, scope, .normal_only);
+ try emitDbgStmt(gz, ret_line, ret_column);
try gz.addRet(rl, operand, node);
return Zir.Inst.Ref.unreachable_value;
}
@@ -6637,6 +6732,7 @@ fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref
defer then_scope.unstack();
try genDefers(&then_scope, defer_outer, scope, .normal_only);
+ try emitDbgStmt(&then_scope, ret_line, ret_column);
try then_scope.addRet(rl, operand, node);
var else_scope = gz.makeSubBlock(scope);
@@ -6646,6 +6742,7 @@ fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref
.both = try else_scope.addUnNode(.err_union_code, result, node),
};
try genDefers(&else_scope, defer_outer, scope, which_ones);
+ try emitDbgStmt(&else_scope, ret_line, ret_column);
try else_scope.addRet(rl, operand, node);
try setCondBrPayload(condbr, is_non_err, &then_scope, 0, &else_scope, 0);
@@ -6708,6 +6805,13 @@ fn identifier(
true => .signed,
false => .unsigned,
};
+ if (ident_name_raw.len >= 3 and ident_name_raw[1] == '0') {
+ return astgen.failNode(
+ ident,
+ "primitive integer type '{s}' has leading zero",
+ .{ident_name_raw},
+ );
+ }
const bit_count = parseBitCount(ident_name_raw[1..]) catch |err| switch (err) {
error.Overflow => return astgen.failNode(
ident,
@@ -6938,17 +7042,6 @@ fn integerLiteral(gz: *GenZir, rl: ResultLoc, node: Ast.Node.Index) InnerError!Z
const main_tokens = tree.nodes.items(.main_token);
const int_token = main_tokens[node];
const prefixed_bytes = tree.tokenSlice(int_token);
- if (std.fmt.parseInt(u64, prefixed_bytes, 0)) |small_int| {
- const result: Zir.Inst.Ref = switch (small_int) {
- 0 => .zero,
- 1 => .one,
- else => try gz.addInt(small_int),
- };
- return rvalue(gz, rl, result, node);
- } else |err| switch (err) {
- error.InvalidCharacter => unreachable, // Caught by the parser.
- error.Overflow => {},
- }
var base: u8 = 10;
var non_prefixed: []const u8 = prefixed_bytes;
@@ -6963,6 +7056,24 @@ fn integerLiteral(gz: *GenZir, rl: ResultLoc, node: Ast.Node.Index) InnerError!Z
non_prefixed = prefixed_bytes[2..];
}
+ if (base == 10 and prefixed_bytes.len >= 2 and prefixed_bytes[0] == '0') {
+ return astgen.failNodeNotes(node, "integer literal '{s}' has leading zero", .{prefixed_bytes}, &.{
+ try astgen.errNoteNode(node, "use '0o' prefix for octal literals", .{}),
+ });
+ }
+
+ if (std.fmt.parseUnsigned(u64, non_prefixed, base)) |small_int| {
+ const result: Zir.Inst.Ref = switch (small_int) {
+ 0 => .zero,
+ 1 => .one,
+ else => try gz.addInt(small_int),
+ };
+ return rvalue(gz, rl, result, node);
+ } else |err| switch (err) {
+ error.InvalidCharacter => unreachable, // Caught by the parser.
+ error.Overflow => {},
+ }
+
const gpa = astgen.gpa;
var big_int = try std.math.big.int.Managed.init(gpa);
defer big_int.deinit();
@@ -7548,7 +7659,6 @@ fn builtinCall(
.trunc => return simpleUnOp(gz, scope, rl, node, .none, params[0], .trunc),
.round => return simpleUnOp(gz, scope, rl, node, .none, params[0], .round),
.tag_name => return simpleUnOp(gz, scope, rl, node, .none, params[0], .tag_name),
- .Type => return simpleUnOp(gz, scope, rl, node, .{ .coerced_ty = .type_info_type }, params[0], .reify),
.type_name => return simpleUnOp(gz, scope, rl, node, .none, params[0], .type_name),
.Frame => return simpleUnOp(gz, scope, rl, node, .none, params[0], .frame_type),
.frame_size => return simpleUnOp(gz, scope, rl, node, .none, params[0], .frame_size),
@@ -7563,6 +7673,31 @@ fn builtinCall(
.truncate => return typeCast(gz, scope, rl, node, params[0], params[1], .truncate),
// zig fmt: on
+ .Type => {
+ const operand = try expr(gz, scope, .{ .coerced_ty = .type_info_type }, params[0]);
+
+ const gpa = gz.astgen.gpa;
+
+ try gz.instructions.ensureUnusedCapacity(gpa, 1);
+ try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1);
+
+ const payload_index = try gz.astgen.addExtra(Zir.Inst.UnNode{
+ .node = gz.nodeIndexToRelative(node),
+ .operand = operand,
+ });
+ const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len);
+ gz.astgen.instructions.appendAssumeCapacity(.{
+ .tag = .extended,
+ .data = .{ .extended = .{
+ .opcode = .reify,
+ .small = @enumToInt(gz.anon_name_strategy),
+ .operand = payload_index,
+ } },
+ });
+ gz.instructions.appendAssumeCapacity(new_index);
+ const result = indexToRef(new_index);
+ return rvalue(gz, rl, result, node);
+ },
.panic => {
try emitDbgNode(gz, node);
return simpleUnOp(gz, scope, rl, node, .{ .ty = .const_slice_u8_type }, params[0], if (gz.force_comptime) .panic_comptime else .panic);
@@ -7605,11 +7740,11 @@ fn builtinCall(
.has_decl => return hasDeclOrField(gz, scope, rl, node, params[0], params[1], .has_decl),
.has_field => return hasDeclOrField(gz, scope, rl, node, params[0], params[1], .has_field),
- .clz => return bitBuiltin(gz, scope, rl, node, params[0], params[1], .clz),
- .ctz => return bitBuiltin(gz, scope, rl, node, params[0], params[1], .ctz),
- .pop_count => return bitBuiltin(gz, scope, rl, node, params[0], params[1], .pop_count),
- .byte_swap => return bitBuiltin(gz, scope, rl, node, params[0], params[1], .byte_swap),
- .bit_reverse => return bitBuiltin(gz, scope, rl, node, params[0], params[1], .bit_reverse),
+ .clz => return bitBuiltin(gz, scope, rl, node, params[0], .clz),
+ .ctz => return bitBuiltin(gz, scope, rl, node, params[0], .ctz),
+ .pop_count => return bitBuiltin(gz, scope, rl, node, params[0], .pop_count),
+ .byte_swap => return bitBuiltin(gz, scope, rl, node, params[0], .byte_swap),
+ .bit_reverse => return bitBuiltin(gz, scope, rl, node, params[0], .bit_reverse),
.div_exact => return divBuiltin(gz, scope, rl, node, params[0], params[1], .div_exact),
.div_floor => return divBuiltin(gz, scope, rl, node, params[0], params[1], .div_floor),
@@ -7972,17 +8107,9 @@ fn bitBuiltin(
scope: *Scope,
rl: ResultLoc,
node: Ast.Node.Index,
- int_type_node: Ast.Node.Index,
operand_node: Ast.Node.Index,
tag: Zir.Inst.Tag,
) InnerError!Zir.Inst.Ref {
- // The accepted proposal https://github.com/ziglang/zig/issues/6835
- // tells us to remove the type parameter from these builtins. To stay
- // source-compatible with stage1, we still observe the parameter here,
- // but we do not encode it into the ZIR. To implement this proposal in
- // stage2, only AstGen code will need to be changed.
- _ = try typeExpr(gz, scope, int_type_node);
-
const operand = try expr(gz, scope, .none, operand_node);
const result = try gz.addUnNode(tag, operand, node);
return rvalue(gz, rl, result, node);
@@ -8147,6 +8274,33 @@ fn callExpr(
assert(callee != .none);
assert(node != 0);
+ const call_index = @intCast(Zir.Inst.Index, astgen.instructions.len);
+ const call_inst = Zir.indexToRef(call_index);
+ try gz.astgen.instructions.append(astgen.gpa, undefined);
+ try gz.instructions.append(astgen.gpa, call_index);
+
+ const scratch_top = astgen.scratch.items.len;
+ defer astgen.scratch.items.len = scratch_top;
+
+ var scratch_index = scratch_top;
+ try astgen.scratch.resize(astgen.gpa, scratch_top + call.ast.params.len);
+
+ for (call.ast.params) |param_node| {
+ var arg_block = gz.makeSubBlock(scope);
+ defer arg_block.unstack();
+
+ // `call_inst` is reused to provide the param type.
+ const arg_ref = try expr(&arg_block, &arg_block.base, .{ .coerced_ty = call_inst }, param_node);
+ _ = try arg_block.addBreak(.break_inline, call_index, arg_ref);
+
+ const body = arg_block.instructionsSlice();
+ try astgen.scratch.ensureUnusedCapacity(astgen.gpa, countBodyLenAfterFixups(astgen, body));
+ appendBodyWithFixupsArrayList(astgen, &astgen.scratch, body);
+
+ astgen.scratch.items[scratch_index] = @intCast(u32, astgen.scratch.items.len - scratch_top);
+ scratch_index += 1;
+ }
+
const payload_index = try addExtra(astgen, Zir.Inst.Call{
.callee = callee,
.flags = .{
@@ -8154,22 +8308,16 @@ fn callExpr(
.args_len = @intCast(Zir.Inst.Call.Flags.PackedArgsLen, call.ast.params.len),
},
});
- var extra_index = try reserveExtra(astgen, call.ast.params.len);
-
- for (call.ast.params) |param_node, i| {
- const param_type = try gz.add(.{
- .tag = .param_type,
- .data = .{ .param_type = .{
- .callee = callee,
- .param_index = @intCast(u32, i),
- } },
- });
- const arg_ref = try expr(gz, scope, .{ .coerced_ty = param_type }, param_node);
- astgen.extra.items[extra_index] = @enumToInt(arg_ref);
- extra_index += 1;
+ if (call.ast.params.len != 0) {
+ try astgen.extra.appendSlice(astgen.gpa, astgen.scratch.items[scratch_top..]);
}
-
- const call_inst = try gz.addPlNodePayloadIndex(.call, node, payload_index);
+ gz.astgen.instructions.set(call_index, .{
+ .tag = .call,
+ .data = .{ .pl_node = .{
+ .src_node = gz.nodeIndexToRelative(node),
+ .payload_index = payload_index,
+ } },
+ });
return rvalue(gz, rl, call_inst, node); // TODO function call with result location
}
@@ -11153,6 +11301,8 @@ const GenZir = struct {
src_node: Ast.Node.Index,
fields_len: u32,
decls_len: u32,
+ backing_int_ref: Zir.Inst.Ref,
+ backing_int_body_len: u32,
layout: std.builtin.Type.ContainerLayout,
known_non_opv: bool,
known_comptime_only: bool,
@@ -11160,7 +11310,7 @@ const GenZir = struct {
const astgen = gz.astgen;
const gpa = astgen.gpa;
- try astgen.extra.ensureUnusedCapacity(gpa, 4);
+ try astgen.extra.ensureUnusedCapacity(gpa, 6);
const payload_index = @intCast(u32, astgen.extra.items.len);
if (args.src_node != 0) {
@@ -11173,6 +11323,12 @@ const GenZir = struct {
if (args.decls_len != 0) {
astgen.extra.appendAssumeCapacity(args.decls_len);
}
+ if (args.backing_int_ref != .none) {
+ astgen.extra.appendAssumeCapacity(args.backing_int_body_len);
+ if (args.backing_int_body_len == 0) {
+ astgen.extra.appendAssumeCapacity(@enumToInt(args.backing_int_ref));
+ }
+ }
astgen.instructions.set(inst, .{
.tag = .extended,
.data = .{ .extended = .{
@@ -11181,6 +11337,7 @@ const GenZir = struct {
.has_src_node = args.src_node != 0,
.has_fields_len = args.fields_len != 0,
.has_decls_len = args.decls_len != 0,
+ .has_backing_int = args.backing_int_ref != .none,
.known_non_opv = args.known_non_opv,
.known_comptime_only = args.known_comptime_only,
.name_strategy = gz.anon_name_strategy,
@@ -11605,6 +11762,45 @@ fn scanDecls(astgen: *AstGen, namespace: *Scope.Namespace, members: []const Ast.
error.OutOfMemory => return error.OutOfMemory,
}
}
+
+ var s = namespace.parent;
+ while (true) switch (s.tag) {
+ .local_val => {
+ const local_val = s.cast(Scope.LocalVal).?;
+ if (local_val.name == name_str_index) {
+ return astgen.failTokNotes(name_token, "redeclaration of {s} '{s}'", .{
+ @tagName(local_val.id_cat), token_bytes,
+ }, &[_]u32{
+ try astgen.errNoteTok(
+ local_val.token_src,
+ "previous declaration here",
+ .{},
+ ),
+ });
+ }
+ s = local_val.parent;
+ },
+ .local_ptr => {
+ const local_ptr = s.cast(Scope.LocalPtr).?;
+ if (local_ptr.name == name_str_index) {
+ return astgen.failTokNotes(name_token, "redeclaration of {s} '{s}'", .{
+ @tagName(local_ptr.id_cat), token_bytes,
+ }, &[_]u32{
+ try astgen.errNoteTok(
+ local_ptr.token_src,
+ "previous declaration here",
+ .{},
+ ),
+ });
+ }
+ s = local_ptr.parent;
+ },
+ .namespace => s = s.cast(Scope.Namespace).?.parent,
+ .gen_zir => s = s.cast(GenZir).?.parent,
+ .defer_normal, .defer_error => s = s.cast(Scope.Defer).?.parent,
+ .defer_gen => s = s.cast(Scope.DeferGen).?.parent,
+ .top => break,
+ };
gop.value_ptr.* = member_node;
}
return decl_count;
@@ -11662,3 +11858,14 @@ fn countBodyLenAfterFixups(astgen: *AstGen, body: []const Zir.Inst.Index) u32 {
}
return @intCast(u32, count);
}
+
+fn emitDbgStmt(gz: *GenZir, line: u32, column: u32) !void {
+ if (gz.force_comptime) return;
+
+ _ = try gz.add(.{ .tag = .dbg_stmt, .data = .{
+ .dbg_stmt = .{
+ .line = line,
+ .column = column,
+ },
+ } });
+}
diff --git a/src/Autodoc.zig b/src/Autodoc.zig
index c2b0359c64..db681157ae 100644
--- a/src/Autodoc.zig
+++ b/src/Autodoc.zig
@@ -9,6 +9,7 @@ const Package = @import("Package.zig");
const Zir = @import("Zir.zig");
const Ref = Zir.Inst.Ref;
const log = std.log.scoped(.autodoc);
+const Docgen = @import("autodoc/render_source.zig");
module: *Module,
doc_location: Compilation.EmitLoc,
@@ -68,6 +69,8 @@ pub fn generateZirData(self: *Autodoc) !void {
}
}
+ log.debug("Ref map size: {}", .{Ref.typed_value_map.len});
+
const root_src_dir = self.module.main_pkg.root_src_directory;
const root_src_path = self.module.main_pkg.root_src_path;
const joined_src_path = try root_src_dir.join(self.arena, &.{root_src_path});
@@ -158,6 +161,9 @@ pub fn generateZirData(self: *Autodoc) !void {
.void_type => .{
.Void = .{ .name = tmpbuf.toOwnedSlice() },
},
+ .type_info_type => .{
+ .ComptimeExpr = .{ .name = tmpbuf.toOwnedSlice() },
+ },
.type_type => .{
.Type = .{ .name = tmpbuf.toOwnedSlice() },
},
@@ -189,10 +195,14 @@ pub fn generateZirData(self: *Autodoc) !void {
);
}
- var root_scope = Scope{ .parent = null, .enclosing_type = main_type_index };
+ var root_scope = Scope{
+ .parent = null,
+ .enclosing_type = main_type_index,
+ };
+
try self.ast_nodes.append(self.arena, .{ .name = "(root)" });
try self.files.put(self.arena, file, main_type_index);
- _ = try self.walkInstruction(file, &root_scope, Zir.main_struct_inst, false);
+ _ = try self.walkInstruction(file, &root_scope, 1, Zir.main_struct_inst, false);
if (self.ref_paths_pending_on_decls.count() > 0) {
@panic("some decl paths were never fully analized (pending on decls)");
@@ -242,6 +252,7 @@ pub fn generateZirData(self: *Autodoc) !void {
try d.handle.openDir(self.doc_location.basename, .{})
else
try self.module.zig_cache_artifact_directory.handle.openDir(self.doc_location.basename, .{});
+
{
const data_js_f = try output_dir.createFile("data.js", .{});
defer data_js_f.close();
@@ -266,6 +277,29 @@ pub fn generateZirData(self: *Autodoc) !void {
try buffer.flush();
}
+ {
+ output_dir.makeDir("src") catch |e| switch (e) {
+ error.PathAlreadyExists => {},
+ else => |err| return err,
+ };
+ const html_dir = try output_dir.openDir("src", .{});
+
+ var files_iterator = self.files.iterator();
+
+ while (files_iterator.next()) |entry| {
+ const new_html_path = entry.key_ptr.*.sub_file_path;
+
+ const html_file = try createFromPath(html_dir, new_html_path);
+ defer html_file.close();
+ var buffer = std.io.bufferedWriter(html_file.writer());
+
+ const out = buffer.writer();
+
+ try Docgen.genHtml(self.module.gpa, entry.key_ptr.*, out);
+ try buffer.flush();
+ }
+ }
+
// copy main.js, index.html
var docs_dir = try self.module.comp.zig_lib_directory.handle.openDir("docs", .{});
defer docs_dir.close();
@@ -273,6 +307,26 @@ pub fn generateZirData(self: *Autodoc) !void {
try docs_dir.copyFile("index.html", output_dir, "index.html", .{});
}
+fn createFromPath(base_dir: std.fs.Dir, path: []const u8) !std.fs.File {
+ var path_tokens = std.mem.tokenize(u8, path, std.fs.path.sep_str);
+ var dir = base_dir;
+ while (path_tokens.next()) |toc| {
+ if (path_tokens.peek() != null) {
+ dir.makeDir(toc) catch |e| switch (e) {
+ error.PathAlreadyExists => {},
+ else => |err| return err,
+ };
+ dir = try dir.openDir(toc, .{});
+ } else {
+ return dir.createFile(toc, .{}) catch |e| switch (e) {
+ error.PathAlreadyExists => try dir.openFile(toc, .{}),
+ else => |err| return err,
+ };
+ }
+ }
+ return error.EmptyPath;
+}
+
/// Represents a chain of scopes, used to resolve decl references to the
/// corresponding entry in `self.decls`.
const Scope = struct {
@@ -563,6 +617,7 @@ const DocData = struct {
type: usize, // index in `types`
this: usize, // index in `types`
declRef: usize, // index in `decls`
+ builtinField: enum { len, ptr },
fieldRef: FieldRef,
refPath: []Expr,
int: struct {
@@ -587,7 +642,7 @@ const DocData = struct {
sizeOf: usize, // index in `exprs`
bitSizeOf: usize, // index in `exprs`
enumToInt: usize, // index in `exprs`
- compileError: []const u8,
+ compileError: usize, //index in `exprs`
errorSets: usize,
string: []const u8, // direct value
sliceIndex: usize,
@@ -652,20 +707,26 @@ const DocData = struct {
var jsw = std.json.writeStream(w, 15);
try jsw.beginObject();
try jsw.objectField(@tagName(active_tag));
- inline for (comptime std.meta.fields(Expr)) |case| {
- if (@field(Expr, case.name) == active_tag) {
- switch (active_tag) {
- .int => {
- if (self.int.negated) try w.writeAll("-");
- try jsw.emitNumber(self.int.value);
- },
- .int_big => {
+ switch (self) {
+ .int => {
+ if (self.int.negated) try w.writeAll("-");
+ try jsw.emitNumber(self.int.value);
+ },
+ .int_big => {
- //@panic("TODO: json serialization of big ints!");
- //if (v.negated) try w.writeAll("-");
- //try jsw.emitNumber(v.value);
- },
- else => {
+ //@panic("TODO: json serialization of big ints!");
+ //if (v.negated) try w.writeAll("-");
+ //try jsw.emitNumber(v.value);
+ },
+ .builtinField => {
+ try jsw.emitString(@tagName(self.builtinField));
+ },
+ else => {
+ inline for (comptime std.meta.fields(Expr)) |case| {
+ // TODO: this is super ugly, fix once `inline else` is a thing
+ if (comptime std.mem.eql(u8, case.name, "builtinField"))
+ continue;
+ if (@field(Expr, case.name) == active_tag) {
try std.json.stringify(@field(self, case.name), opt, w);
jsw.state_index -= 1;
// TODO: we should not reach into the state of the
@@ -674,9 +735,9 @@ const DocData = struct {
// would be nice to have a proper integration
// between the json writer and the generic
// std.json.stringify implementation
- },
+ }
}
- }
+ },
}
try jsw.endObject();
}
@@ -712,6 +773,7 @@ fn walkInstruction(
self: *Autodoc,
file: *File,
parent_scope: *Scope,
+ parent_line: usize,
inst_index: usize,
need_type: bool, // true if the caller needs us to provide also a typeRef
) AutodocErrors!DocData.WalkResult {
@@ -794,12 +856,16 @@ fn walkInstruction(
const new_file = self.module.import_table.get(abs_root_src_path).?;
- var root_scope = Scope{ .parent = null, .enclosing_type = main_type_index };
+ var root_scope = Scope{
+ .parent = null,
+ .enclosing_type = main_type_index,
+ };
try self.ast_nodes.append(self.arena, .{ .name = "(root)" });
try self.files.put(self.arena, new_file, main_type_index);
return self.walkInstruction(
new_file,
&root_scope,
+ 1,
Zir.main_struct_inst,
false,
);
@@ -824,13 +890,14 @@ fn walkInstruction(
return self.walkInstruction(
new_file.file,
&new_scope,
+ 1,
Zir.main_struct_inst,
need_type,
);
},
.ret_node => {
const un_node = data[inst_index].un_node;
- return self.walkRef(file, parent_scope, un_node.operand, false);
+ return self.walkRef(file, parent_scope, parent_line, un_node.operand, false);
},
.ret_load => {
const un_node = data[inst_index].un_node;
@@ -861,7 +928,7 @@ fn walkInstruction(
}
if (result_ref) |rr| {
- return self.walkRef(file, parent_scope, rr, need_type);
+ return self.walkRef(file, parent_scope, parent_line, rr, need_type);
}
return DocData.WalkResult{
@@ -870,11 +937,11 @@ fn walkInstruction(
},
.closure_get => {
const inst_node = data[inst_index].inst_node;
- return try self.walkInstruction(file, parent_scope, inst_node.inst, need_type);
+ return try self.walkInstruction(file, parent_scope, parent_line, inst_node.inst, need_type);
},
.closure_capture => {
const un_tok = data[inst_index].un_tok;
- return try self.walkRef(file, parent_scope, un_tok.operand, need_type);
+ return try self.walkRef(file, parent_scope, parent_line, un_tok.operand, need_type);
},
.cmpxchg_strong, .cmpxchg_weak => {
const pl_node = data[inst_index].pl_node;
@@ -889,6 +956,7 @@ fn walkInstruction(
var ptr: DocData.WalkResult = try self.walkRef(
file,
parent_scope,
+ parent_line,
extra.data.ptr,
false,
);
@@ -898,6 +966,7 @@ fn walkInstruction(
var expected_value: DocData.WalkResult = try self.walkRef(
file,
parent_scope,
+ parent_line,
extra.data.expected_value,
false,
);
@@ -907,6 +976,7 @@ fn walkInstruction(
var new_value: DocData.WalkResult = try self.walkRef(
file,
parent_scope,
+ parent_line,
extra.data.new_value,
false,
);
@@ -916,6 +986,7 @@ fn walkInstruction(
var success_order: DocData.WalkResult = try self.walkRef(
file,
parent_scope,
+ parent_line,
extra.data.success_order,
false,
);
@@ -925,6 +996,7 @@ fn walkInstruction(
var failure_order: DocData.WalkResult = try self.walkRef(
file,
parent_scope,
+ parent_line,
extra.data.failure_order,
false,
);
@@ -978,17 +1050,16 @@ fn walkInstruction(
var operand: DocData.WalkResult = try self.walkRef(
file,
parent_scope,
+ parent_line,
un_node.operand,
false,
);
+ const operand_index = self.exprs.items.len;
+ try self.exprs.append(self.arena, operand.expr);
+
return DocData.WalkResult{
- .expr = .{
- .compileError = switch (operand.expr) {
- .string => |s| s,
- else => "TODO: non-string @compileError arguments",
- },
- },
+ .expr = .{ .compileError = operand_index },
};
},
.enum_literal => {
@@ -1034,12 +1105,14 @@ fn walkInstruction(
var lhs: DocData.WalkResult = try self.walkRef(
file,
parent_scope,
+ parent_line,
extra.data.lhs,
false,
);
var start: DocData.WalkResult = try self.walkRef(
file,
parent_scope,
+ parent_line,
extra.data.start,
false,
);
@@ -1065,18 +1138,21 @@ fn walkInstruction(
var lhs: DocData.WalkResult = try self.walkRef(
file,
parent_scope,
+ parent_line,
extra.data.lhs,
false,
);
var start: DocData.WalkResult = try self.walkRef(
file,
parent_scope,
+ parent_line,
extra.data.start,
false,
);
var end: DocData.WalkResult = try self.walkRef(
file,
parent_scope,
+ parent_line,
extra.data.end,
false,
);
@@ -1104,24 +1180,28 @@ fn walkInstruction(
var lhs: DocData.WalkResult = try self.walkRef(
file,
parent_scope,
+ parent_line,
extra.data.lhs,
false,
);
var start: DocData.WalkResult = try self.walkRef(
file,
parent_scope,
+ parent_line,
extra.data.start,
false,
);
var end: DocData.WalkResult = try self.walkRef(
file,
parent_scope,
+ parent_line,
extra.data.end,
false,
);
var sentinel: DocData.WalkResult = try self.walkRef(
file,
parent_scope,
+ parent_line,
extra.data.sentinel,
false,
);
@@ -1171,12 +1251,14 @@ fn walkInstruction(
var lhs: DocData.WalkResult = try self.walkRef(
file,
parent_scope,
+ parent_line,
extra.data.lhs,
false,
);
var rhs: DocData.WalkResult = try self.walkRef(
file,
parent_scope,
+ parent_line,
extra.data.rhs,
false,
);
@@ -1220,7 +1302,6 @@ fn walkInstruction(
.trunc,
.round,
.tag_name,
- .reify,
.type_name,
.frame_type,
.frame_size,
@@ -1238,7 +1319,7 @@ fn walkInstruction(
const un_node = data[inst_index].un_node;
const bin_index = self.exprs.items.len;
try self.exprs.append(self.arena, .{ .builtin = .{ .param = 0 } });
- const param = try self.walkRef(file, parent_scope, un_node.operand, false);
+ const param = try self.walkRef(file, parent_scope, parent_line, un_node.operand, false);
const param_index = self.exprs.items.len;
try self.exprs.append(self.arena, param.expr);
@@ -1287,12 +1368,14 @@ fn walkInstruction(
var lhs: DocData.WalkResult = try self.walkRef(
file,
parent_scope,
+ parent_line,
extra.data.lhs,
false,
);
var rhs: DocData.WalkResult = try self.walkRef(
file,
parent_scope,
+ parent_line,
extra.data.rhs,
false,
);
@@ -1315,12 +1398,14 @@ fn walkInstruction(
var lhs: DocData.WalkResult = try self.walkRef(
file,
parent_scope,
+ parent_line,
extra.data.lhs,
false,
);
var rhs: DocData.WalkResult = try self.walkRef(
file,
parent_scope,
+ parent_line,
extra.data.rhs,
false,
);
@@ -1343,12 +1428,14 @@ fn walkInstruction(
var lhs: DocData.WalkResult = try self.walkRef(
file,
parent_scope,
+ parent_line,
extra.data.lhs,
false,
);
var rhs: DocData.WalkResult = try self.walkRef(
file,
parent_scope,
+ parent_line,
extra.data.rhs,
false,
);
@@ -1368,7 +1455,7 @@ fn walkInstruction(
// var operand: DocData.WalkResult = try self.walkRef(
// file,
- // parent_scope,
+ // parent_scope, parent_line,
// un_node.operand,
// false,
// );
@@ -1377,7 +1464,7 @@ fn walkInstruction(
// },
.overflow_arithmetic_ptr => {
const un_node = data[inst_index].un_node;
- const elem_type_ref = try self.walkRef(file, parent_scope, un_node.operand, false);
+ const elem_type_ref = try self.walkRef(file, parent_scope, parent_line, un_node.operand, false);
const type_slot_index = self.types.items.len;
try self.types.append(self.arena, .{
.Pointer = .{
@@ -1402,6 +1489,7 @@ fn walkInstruction(
const elem_type_ref = try self.walkRef(
file,
parent_scope,
+ parent_line,
extra.data.elem_type,
false,
);
@@ -1411,7 +1499,7 @@ fn walkInstruction(
var sentinel: ?DocData.Expr = null;
if (ptr.flags.has_sentinel) {
const ref = @intToEnum(Zir.Inst.Ref, file.zir.extra[extra_index]);
- const ref_result = try self.walkRef(file, parent_scope, ref, false);
+ const ref_result = try self.walkRef(file, parent_scope, parent_line, ref, false);
sentinel = ref_result.expr;
extra_index += 1;
}
@@ -1419,21 +1507,21 @@ fn walkInstruction(
var @"align": ?DocData.Expr = null;
if (ptr.flags.has_align) {
const ref = @intToEnum(Zir.Inst.Ref, file.zir.extra[extra_index]);
- const ref_result = try self.walkRef(file, parent_scope, ref, false);
+ const ref_result = try self.walkRef(file, parent_scope, parent_line, ref, false);
@"align" = ref_result.expr;
extra_index += 1;
}
var address_space: ?DocData.Expr = null;
if (ptr.flags.has_addrspace) {
const ref = @intToEnum(Zir.Inst.Ref, file.zir.extra[extra_index]);
- const ref_result = try self.walkRef(file, parent_scope, ref, false);
+ const ref_result = try self.walkRef(file, parent_scope, parent_line, ref, false);
address_space = ref_result.expr;
extra_index += 1;
}
var bit_start: ?DocData.Expr = null;
if (ptr.flags.has_bit_range) {
const ref = @intToEnum(Zir.Inst.Ref, file.zir.extra[extra_index]);
- const ref_result = try self.walkRef(file, parent_scope, ref, false);
+ const ref_result = try self.walkRef(file, parent_scope, parent_line, ref, false);
address_space = ref_result.expr;
extra_index += 1;
}
@@ -1441,7 +1529,7 @@ fn walkInstruction(
var host_size: ?DocData.Expr = null;
if (ptr.flags.has_bit_range) {
const ref = @intToEnum(Zir.Inst.Ref, file.zir.extra[extra_index]);
- const ref_result = try self.walkRef(file, parent_scope, ref, false);
+ const ref_result = try self.walkRef(file, parent_scope, parent_line, ref, false);
host_size = ref_result.expr;
}
@@ -1471,8 +1559,8 @@ fn walkInstruction(
.array_type => {
const pl_node = data[inst_index].pl_node;
const bin = file.zir.extraData(Zir.Inst.Bin, pl_node.payload_index).data;
- const len = try self.walkRef(file, parent_scope, bin.lhs, false);
- const child = try self.walkRef(file, parent_scope, bin.rhs, false);
+ const len = try self.walkRef(file, parent_scope, parent_line, bin.lhs, false);
+ const child = try self.walkRef(file, parent_scope, parent_line, bin.rhs, false);
const type_slot_index = self.types.items.len;
try self.types.append(self.arena, .{
@@ -1490,9 +1578,9 @@ fn walkInstruction(
.array_type_sentinel => {
const pl_node = data[inst_index].pl_node;
const extra = file.zir.extraData(Zir.Inst.ArrayTypeSentinel, pl_node.payload_index);
- const len = try self.walkRef(file, parent_scope, extra.data.len, false);
- const sentinel = try self.walkRef(file, parent_scope, extra.data.sentinel, false);
- const elem_type = try self.walkRef(file, parent_scope, extra.data.elem_type, false);
+ const len = try self.walkRef(file, parent_scope, parent_line, extra.data.len, false);
+ const sentinel = try self.walkRef(file, parent_scope, parent_line, extra.data.sentinel, false);
+ const elem_type = try self.walkRef(file, parent_scope, parent_line, extra.data.elem_type, false);
const type_slot_index = self.types.items.len;
try self.types.append(self.arena, .{
@@ -1514,10 +1602,10 @@ fn walkInstruction(
const array_data = try self.arena.alloc(usize, operands.len - 1);
std.debug.assert(operands.len > 0);
- var array_type = try self.walkRef(file, parent_scope, operands[0], false);
+ var array_type = try self.walkRef(file, parent_scope, parent_line, operands[0], false);
for (operands[1..]) |op, idx| {
- const wr = try self.walkRef(file, parent_scope, op, false);
+ const wr = try self.walkRef(file, parent_scope, parent_line, op, false);
const expr_index = self.exprs.items.len;
try self.exprs.append(self.arena, wr.expr);
array_data[idx] = expr_index;
@@ -1535,7 +1623,7 @@ fn walkInstruction(
const array_data = try self.arena.alloc(usize, operands.len);
for (operands) |op, idx| {
- const wr = try self.walkRef(file, parent_scope, op, false);
+ const wr = try self.walkRef(file, parent_scope, parent_line, op, false);
const expr_index = self.exprs.items.len;
try self.exprs.append(self.arena, wr.expr);
array_data[idx] = expr_index;
@@ -1553,10 +1641,10 @@ fn walkInstruction(
const array_data = try self.arena.alloc(usize, operands.len - 1);
std.debug.assert(operands.len > 0);
- var array_type = try self.walkRef(file, parent_scope, operands[0], false);
+ var array_type = try self.walkRef(file, parent_scope, parent_line, operands[0], false);
for (operands[1..]) |op, idx| {
- const wr = try self.walkRef(file, parent_scope, op, false);
+ const wr = try self.walkRef(file, parent_scope, parent_line, op, false);
const expr_index = self.exprs.items.len;
try self.exprs.append(self.arena, wr.expr);
array_data[idx] = expr_index;
@@ -1585,7 +1673,7 @@ fn walkInstruction(
const array_data = try self.arena.alloc(usize, operands.len);
for (operands) |op, idx| {
- const wr = try self.walkRef(file, parent_scope, op, false);
+ const wr = try self.walkRef(file, parent_scope, parent_line, op, false);
const expr_index = self.exprs.items.len;
try self.exprs.append(self.arena, wr.expr);
array_data[idx] = expr_index;
@@ -1620,6 +1708,7 @@ fn walkInstruction(
var operand: DocData.WalkResult = try self.walkRef(
file,
parent_scope,
+ parent_line,
un_node.operand,
need_type,
);
@@ -1641,6 +1730,7 @@ fn walkInstruction(
const operand = try self.walkRef(
file,
parent_scope,
+ parent_line,
un_node.operand,
false,
);
@@ -1657,6 +1747,7 @@ fn walkInstruction(
const operand = try self.walkRef(
file,
parent_scope,
+ parent_line,
un_node.operand,
need_type,
);
@@ -1674,6 +1765,7 @@ fn walkInstruction(
const operand = try self.walkRef(
file,
parent_scope,
+ parent_line,
un_node.operand,
false,
);
@@ -1690,7 +1782,7 @@ fn walkInstruction(
const pl_node = data[inst_index].pl_node;
const extra = file.zir.extraData(Zir.Inst.SwitchBlock, pl_node.payload_index);
const cond_index = self.exprs.items.len;
- _ = try self.walkRef(file, parent_scope, extra.data.operand, false);
+ _ = try self.walkRef(file, parent_scope, parent_line, extra.data.operand, false);
const ast_index = self.ast_nodes.items.len;
const type_index = self.types.items.len - 1;
@@ -1718,6 +1810,7 @@ fn walkInstruction(
const operand = try self.walkRef(
file,
parent_scope,
+ parent_line,
un_node.operand,
need_type,
);
@@ -1743,6 +1836,7 @@ fn walkInstruction(
const operand = try self.walkRef(
file,
parent_scope,
+ parent_line,
un_node.operand,
need_type,
);
@@ -1762,6 +1856,7 @@ fn walkInstruction(
var operand: DocData.WalkResult = try self.walkRef(
file,
parent_scope,
+ parent_line,
data[body].@"break".operand,
false,
);
@@ -1780,6 +1875,7 @@ fn walkInstruction(
const operand = try self.walkRef(
file,
parent_scope,
+ parent_line,
un_node.operand,
need_type,
);
@@ -1798,6 +1894,7 @@ fn walkInstruction(
const dest_type_walk = try self.walkRef(
file,
parent_scope,
+ parent_line,
extra.data.dest_type,
false,
);
@@ -1805,6 +1902,7 @@ fn walkInstruction(
const operand = try self.walkRef(
file,
parent_scope,
+ parent_line,
extra.data.operand,
false,
);
@@ -1832,6 +1930,7 @@ fn walkInstruction(
const operand: DocData.WalkResult = try self.walkRef(
file,
parent_scope,
+ parent_line,
un_node.operand,
false,
);
@@ -1863,31 +1962,60 @@ fn walkInstruction(
const extra = file.zir.extraData(Zir.Inst.Field, pl_node.payload_index);
var path: std.ArrayListUnmanaged(DocData.Expr) = .{};
- var lhs = @enumToInt(extra.data.lhs) - Ref.typed_value_map.len; // underflow = need to handle Refs
-
try path.append(self.arena, .{
.string = file.zir.nullTerminatedString(extra.data.field_name_start),
});
+
// Put inside path the starting index of each decl name that
- // we encounter as we navigate through all the field_vals
- while (tags[lhs] == .field_val or
- tags[lhs] == .field_call_bind or
- tags[lhs] == .field_ptr or
- tags[lhs] == .field_type)
- {
- const lhs_extra = file.zir.extraData(
- Zir.Inst.Field,
- data[lhs].pl_node.payload_index,
- );
+ // we encounter as we navigate through all the field_*s
+ const lhs_ref = blk: {
+ var lhs_extra = extra;
+ while (true) {
+ if (@enumToInt(lhs_extra.data.lhs) < Ref.typed_value_map.len) {
+ break :blk lhs_extra.data.lhs;
+ }
- try path.append(self.arena, .{
- .string = file.zir.nullTerminatedString(lhs_extra.data.field_name_start),
- });
- lhs = @enumToInt(lhs_extra.data.lhs) - Ref.typed_value_map.len; // underflow = need to handle Refs
- }
+ const lhs = @enumToInt(lhs_extra.data.lhs) - Ref.typed_value_map.len;
+ if (tags[lhs] != .field_val and
+ tags[lhs] != .field_call_bind and
+ tags[lhs] != .field_ptr and
+ tags[lhs] != .field_type) break :blk lhs_extra.data.lhs;
+ lhs_extra = file.zir.extraData(
+ Zir.Inst.Field,
+ data[lhs].pl_node.payload_index,
+ );
+
+ try path.append(self.arena, .{
+ .string = file.zir.nullTerminatedString(lhs_extra.data.field_name_start),
+ });
+ }
+ };
+
+ // If the lhs is a `call` instruction, it means that we're inside
+ // a function call and we're referring to one of its arguments.
+ // We can't just blindly analyze the instruction or we will
+ // start recursing forever.
+ // TODO: add proper resolution of the container type for `calls`
+ // TODO: we're like testing lhs as an instruction twice
+ // (above and below) this todo, maybe a cleaer solution woul
+ // avoid that.
// TODO: double check that we really don't need type info here
- const wr = try self.walkInstruction(file, parent_scope, lhs, false);
+
+ const wr = blk: {
+ if (@enumToInt(lhs_ref) >= Ref.typed_value_map.len) {
+ const lhs_inst = @enumToInt(lhs_ref) - Ref.typed_value_map.len;
+ if (tags[lhs_inst] == .call) {
+ break :blk DocData.WalkResult{
+ .expr = .{
+ .comptimeExpr = 0,
+ },
+ };
+ }
+ }
+
+ break :blk try self.walkRef(file, parent_scope, parent_line, lhs_ref, false);
+ };
try path.append(self.arena, wr.expr);
// This way the data in `path` has the same ordering that the ref
@@ -1906,7 +2034,7 @@ fn walkInstruction(
// - (2) Paths can sometimes never resolve fully. This means that
// any value that depends on that will have to become a
// comptimeExpr.
- try self.tryResolveRefPath(file, lhs, path.items);
+ try self.tryResolveRefPath(file, inst_index, path.items);
return DocData.WalkResult{ .expr = .{ .refPath = path.items } };
},
.int_type => {
@@ -1937,6 +2065,7 @@ fn walkInstruction(
return self.walkRef(
file,
parent_scope,
+ parent_line,
getBlockInlineBreak(file.zir, inst_index),
need_type,
);
@@ -1969,6 +2098,7 @@ fn walkInstruction(
const wr = try self.walkRef(
file,
parent_scope,
+ parent_line,
field_extra.data.container_type,
false,
);
@@ -1979,6 +2109,7 @@ fn walkInstruction(
const value = try self.walkRef(
file,
parent_scope,
+ parent_line,
init_extra.data.init,
need_type,
);
@@ -1995,6 +2126,7 @@ fn walkInstruction(
var operand: DocData.WalkResult = try self.walkRef(
file,
parent_scope,
+ parent_line,
un_node.operand,
false,
);
@@ -2011,6 +2143,34 @@ fn walkInstruction(
);
return self.cteTodo(@tagName(tags[inst_index]));
},
+ .struct_init_anon => {
+ const pl_node = data[inst_index].pl_node;
+ const extra = file.zir.extraData(Zir.Inst.StructInitAnon, pl_node.payload_index);
+
+ const field_vals = try self.arena.alloc(
+ DocData.Expr.FieldVal,
+ extra.data.fields_len,
+ );
+
+ var idx = extra.end;
+ for (field_vals) |*fv| {
+ const init_extra = file.zir.extraData(Zir.Inst.StructInitAnon.Item, idx);
+ const field_name = file.zir.nullTerminatedString(init_extra.data.field_name);
+ const value = try self.walkRef(
+ file,
+ parent_scope,
+ parent_line,
+ init_extra.data.init,
+ need_type,
+ );
+ fv.* = .{ .name = field_name, .val = value };
+ idx = init_extra.end;
+ }
+
+ return DocData.WalkResult{
+ .expr = .{ .@"struct" = field_vals },
+ };
+ },
.error_set_decl => {
const pl_node = data[inst_index].pl_node;
const extra = file.zir.extraData(Zir.Inst.ErrorSetDecl, pl_node.payload_index);
@@ -2075,18 +2235,23 @@ fn walkInstruction(
const pl_node = data[inst_index].pl_node;
const extra = file.zir.extraData(Zir.Inst.Call, pl_node.payload_index);
- const callee = try self.walkRef(file, parent_scope, extra.data.callee, need_type);
+ const callee = try self.walkRef(file, parent_scope, parent_line, extra.data.callee, need_type);
const args_len = extra.data.flags.args_len;
var args = try self.arena.alloc(DocData.Expr, args_len);
- const arg_refs = file.zir.refSlice(extra.end, args_len);
- for (arg_refs) |ref, idx| {
+ const body = file.zir.extra[extra.end..];
+
+ var i: usize = 0;
+ while (i < args_len) : (i += 1) {
+ const arg_end = file.zir.extra[extra.end + i];
+ const break_index = body[arg_end - 1];
+ const ref = data[break_index].@"break".operand;
// TODO: consider toggling need_type to true if we ever want
// to show discrepancies between the types of provided
// arguments and the types declared in the function
// signature for its parameters.
- const wr = try self.walkRef(file, parent_scope, ref, false);
- args[idx] = wr.expr;
+ const wr = try self.walkRef(file, parent_scope, parent_line, ref, false);
+ args[i] = wr.expr;
}
const cte_slot_index = self.comptime_exprs.items.len;
@@ -2116,6 +2281,7 @@ fn walkInstruction(
const result = self.analyzeFunction(
file,
parent_scope,
+ parent_line,
inst_index,
self_ast_node_index,
type_slot_index,
@@ -2131,6 +2297,7 @@ fn walkInstruction(
const result = self.analyzeFancyFunction(
file,
parent_scope,
+ parent_line,
inst_index,
self_ast_node_index,
type_slot_index,
@@ -2158,7 +2325,7 @@ fn walkInstruction(
var array_type: ?DocData.Expr = null;
for (args) |arg, idx| {
- const wr = try self.walkRef(file, parent_scope, arg, idx == 0);
+ const wr = try self.walkRef(file, parent_scope, parent_line, arg, idx == 0);
if (idx == 0) {
array_type = wr.typeRef;
}
@@ -2303,6 +2470,7 @@ fn walkInstruction(
extra_index = try self.walkDecls(
file,
&scope,
+ parent_line,
decls_first_index,
decls_len,
&decl_indexes,
@@ -2323,6 +2491,7 @@ fn walkInstruction(
try self.collectUnionFieldInfo(
file,
&scope,
+ parent_line,
fields_len,
&field_type_refs,
&field_name_indexes,
@@ -2423,6 +2592,7 @@ fn walkInstruction(
extra_index = try self.walkDecls(
file,
&scope,
+ parent_line,
decls_first_index,
decls_len,
&decl_indexes,
@@ -2532,6 +2702,17 @@ fn walkInstruction(
break :blk decls_len;
} else 0;
+ // TODO: Expose explicit backing integer types in some way.
+ if (small.has_backing_int) {
+ const backing_int_body_len = file.zir.extra[extra_index];
+ extra_index += 1; // backing_int_body_len
+ if (backing_int_body_len == 0) {
+ extra_index += 1; // backing_int_ref
+ } else {
+ extra_index += backing_int_body_len; // backing_int_body_inst
+ }
+ }
+
var decl_indexes: std.ArrayListUnmanaged(usize) = .{};
var priv_decl_indexes: std.ArrayListUnmanaged(usize) = .{};
@@ -2555,6 +2736,7 @@ fn walkInstruction(
extra_index = try self.walkDecls(
file,
&scope,
+ parent_line,
decls_first_index,
decls_len,
&decl_indexes,
@@ -2567,6 +2749,7 @@ fn walkInstruction(
try self.collectStructFieldInfo(
file,
&scope,
+ parent_line,
fields_len,
&field_type_refs,
&field_name_indexes,
@@ -2605,11 +2788,12 @@ fn walkInstruction(
},
.error_to_int,
.int_to_error,
+ .reify,
=> {
const extra = file.zir.extraData(Zir.Inst.UnNode, extended.operand).data;
const bin_index = self.exprs.items.len;
try self.exprs.append(self.arena, .{ .builtin = .{ .param = 0 } });
- const param = try self.walkRef(file, parent_scope, extra.operand, false);
+ const param = try self.walkRef(file, parent_scope, parent_line, extra.operand, false);
const param_index = self.exprs.items.len;
try self.exprs.append(self.arena, param.expr);
@@ -2637,6 +2821,7 @@ fn walkDecls(
self: *Autodoc,
file: *File,
scope: *Scope,
+ parent_line: usize,
decls_first_index: usize,
decls_len: u32,
decl_indexes: *std.ArrayListUnmanaged(usize),
@@ -2669,7 +2854,7 @@ fn walkDecls(
// const hash_u32s = file.zir.extra[extra_index..][0..4];
extra_index += 4;
- const line = file.zir.extra[extra_index];
+ const line = parent_line + file.zir.extra[extra_index];
extra_index += 1;
const decl_name_index = file.zir.extra[extra_index];
extra_index += 1;
@@ -2808,7 +2993,7 @@ fn walkDecls(
const ast_node_index = idx: {
const idx = self.ast_nodes.items.len;
try self.ast_nodes.append(self.arena, .{
- .file = 0,
+ .file = self.files.getIndex(file) orelse unreachable,
.line = line,
.col = 0,
.docs = doc_comment,
@@ -2820,7 +3005,7 @@ fn walkDecls(
const walk_result = if (is_test) // TODO: decide if tests should show up at all
DocData.WalkResult{ .expr = .{ .void = .{} } }
else
- try self.walkInstruction(file, scope, value_index, true);
+ try self.walkInstruction(file, scope, line, value_index, true);
if (is_pub) {
try decl_indexes.append(self.arena, decls_slot_index);
@@ -3013,6 +3198,10 @@ fn tryResolveRefPath(
.{ @tagName(self.types.items[t_index]), resolved_parent },
);
},
+ .ComptimeExpr => {
+ // Same as the comptimeExpr branch above
+ break :outer;
+ },
.Unanalyzed => {
// This decl path is pending completion
{
@@ -3035,6 +3224,20 @@ fn tryResolveRefPath(
return;
},
+ .Array => {
+ if (std.mem.eql(u8, child_string, "len")) {
+ path[i + 1] = .{
+ .builtinField = .len,
+ };
+ } else {
+ panicWithContext(
+ file,
+ inst_index,
+ "TODO: handle `{s}` in tryResolveDeclPath.type.Array\nInfo: {}",
+ .{ child_string, resolved_parent },
+ );
+ }
+ },
.Enum => |t_enum| {
for (t_enum.pubDecls) |d| {
// TODO: this could be improved a lot
@@ -3198,6 +3401,7 @@ fn analyzeFancyFunction(
self: *Autodoc,
file: *File,
scope: *Scope,
+ parent_line: usize,
inst_index: usize,
self_ast_node_index: usize,
type_slot_index: usize,
@@ -3262,7 +3466,7 @@ fn analyzeFancyFunction(
const break_index = file.zir.extra[extra.end..][extra.data.body_len - 1];
const break_operand = data[break_index].@"break".operand;
- const param_type_ref = try self.walkRef(file, scope, break_operand, false);
+ const param_type_ref = try self.walkRef(file, scope, parent_line, break_operand, false);
param_type_refs.appendAssumeCapacity(param_type_ref.expr);
},
@@ -3286,7 +3490,7 @@ fn analyzeFancyFunction(
if (extra.data.bits.has_align_ref) {
const align_ref = @intToEnum(Zir.Inst.Ref, file.zir.extra[extra_index]);
align_index = self.exprs.items.len;
- _ = try self.walkRef(file, scope, align_ref, false);
+ _ = try self.walkRef(file, scope, parent_line, align_ref, false);
extra_index += 1;
} else if (extra.data.bits.has_align_body) {
const align_body_len = file.zir.extra[extra_index];
@@ -3303,7 +3507,7 @@ fn analyzeFancyFunction(
if (extra.data.bits.has_addrspace_ref) {
const addrspace_ref = @intToEnum(Zir.Inst.Ref, file.zir.extra[extra_index]);
addrspace_index = self.exprs.items.len;
- _ = try self.walkRef(file, scope, addrspace_ref, false);
+ _ = try self.walkRef(file, scope, parent_line, addrspace_ref, false);
extra_index += 1;
} else if (extra.data.bits.has_addrspace_body) {
const addrspace_body_len = file.zir.extra[extra_index];
@@ -3320,7 +3524,7 @@ fn analyzeFancyFunction(
if (extra.data.bits.has_section_ref) {
const section_ref = @intToEnum(Zir.Inst.Ref, file.zir.extra[extra_index]);
section_index = self.exprs.items.len;
- _ = try self.walkRef(file, scope, section_ref, false);
+ _ = try self.walkRef(file, scope, parent_line, section_ref, false);
extra_index += 1;
} else if (extra.data.bits.has_section_body) {
const section_body_len = file.zir.extra[extra_index];
@@ -3337,7 +3541,7 @@ fn analyzeFancyFunction(
if (extra.data.bits.has_cc_ref) {
const cc_ref = @intToEnum(Zir.Inst.Ref, file.zir.extra[extra_index]);
cc_index = self.exprs.items.len;
- _ = try self.walkRef(file, scope, cc_ref, false);
+ _ = try self.walkRef(file, scope, parent_line, cc_ref, false);
extra_index += 1;
} else if (extra.data.bits.has_cc_body) {
const cc_body_len = file.zir.extra[extra_index];
@@ -3356,14 +3560,14 @@ fn analyzeFancyFunction(
.none => DocData.Expr{ .void = .{} },
else => blk: {
const ref = fn_info.ret_ty_ref;
- const wr = try self.walkRef(file, scope, ref, false);
+ const wr = try self.walkRef(file, scope, parent_line, ref, false);
break :blk wr.expr;
},
},
else => blk: {
const last_instr_index = fn_info.ret_ty_body[fn_info.ret_ty_body.len - 1];
const break_operand = data[last_instr_index].@"break".operand;
- const wr = try self.walkRef(file, scope, break_operand, false);
+ const wr = try self.walkRef(file, scope, parent_line, break_operand, false);
break :blk wr.expr;
},
};
@@ -3378,6 +3582,7 @@ fn analyzeFancyFunction(
break :blk try self.getGenericReturnType(
file,
scope,
+ parent_line,
fn_info.body[fn_info.body.len - 1],
);
} else {
@@ -3414,6 +3619,7 @@ fn analyzeFunction(
self: *Autodoc,
file: *File,
scope: *Scope,
+ parent_line: usize,
inst_index: usize,
self_ast_node_index: usize,
type_slot_index: usize,
@@ -3479,7 +3685,7 @@ fn analyzeFunction(
const break_index = file.zir.extra[extra.end..][extra.data.body_len - 1];
const break_operand = data[break_index].@"break".operand;
- const param_type_ref = try self.walkRef(file, scope, break_operand, false);
+ const param_type_ref = try self.walkRef(file, scope, parent_line, break_operand, false);
param_type_refs.appendAssumeCapacity(param_type_ref.expr);
},
@@ -3492,14 +3698,14 @@ fn analyzeFunction(
.none => DocData.Expr{ .void = .{} },
else => blk: {
const ref = fn_info.ret_ty_ref;
- const wr = try self.walkRef(file, scope, ref, false);
+ const wr = try self.walkRef(file, scope, parent_line, ref, false);
break :blk wr.expr;
},
},
else => blk: {
const last_instr_index = fn_info.ret_ty_body[fn_info.ret_ty_body.len - 1];
const break_operand = data[last_instr_index].@"break".operand;
- const wr = try self.walkRef(file, scope, break_operand, false);
+ const wr = try self.walkRef(file, scope, parent_line, break_operand, false);
break :blk wr.expr;
},
};
@@ -3514,6 +3720,7 @@ fn analyzeFunction(
break :blk try self.getGenericReturnType(
file,
scope,
+ parent_line,
fn_info.body[fn_info.body.len - 1],
);
} else {
@@ -3554,9 +3761,11 @@ fn getGenericReturnType(
self: *Autodoc,
file: *File,
scope: *Scope,
+ parent_line: usize, // function decl line
body_end: usize,
) !DocData.Expr {
- const wr = try self.walkInstruction(file, scope, body_end, false);
+ // TODO: compute the correct line offset
+ const wr = try self.walkInstruction(file, scope, parent_line, body_end, false);
return wr.expr;
}
@@ -3564,6 +3773,7 @@ fn collectUnionFieldInfo(
self: *Autodoc,
file: *File,
scope: *Scope,
+ parent_line: usize,
fields_len: usize,
field_type_refs: *std.ArrayListUnmanaged(DocData.Expr),
field_name_indexes: *std.ArrayListUnmanaged(usize),
@@ -3610,7 +3820,7 @@ fn collectUnionFieldInfo(
// type
{
- const walk_result = try self.walkRef(file, scope, field_type, false);
+ const walk_result = try self.walkRef(file, scope, parent_line, field_type, false);
try field_type_refs.append(self.arena, walk_result.expr);
}
@@ -3633,6 +3843,7 @@ fn collectStructFieldInfo(
self: *Autodoc,
file: *File,
scope: *Scope,
+ parent_line: usize,
fields_len: usize,
field_type_refs: *std.ArrayListUnmanaged(DocData.Expr),
field_name_indexes: *std.ArrayListUnmanaged(usize),
@@ -3706,7 +3917,7 @@ fn collectStructFieldInfo(
for (fields) |field| {
const type_expr = expr: {
if (field.type_ref != .none) {
- const walk_result = try self.walkRef(file, scope, field.type_ref, false);
+ const walk_result = try self.walkRef(file, scope, parent_line, field.type_ref, false);
break :expr walk_result.expr;
}
@@ -3716,7 +3927,7 @@ fn collectStructFieldInfo(
const break_inst = body[body.len - 1];
const operand = data[break_inst].@"break".operand;
- const walk_result = try self.walkRef(file, scope, operand, false);
+ const walk_result = try self.walkRef(file, scope, parent_line, operand, false);
break :expr walk_result.expr;
};
@@ -3746,6 +3957,7 @@ fn walkRef(
self: *Autodoc,
file: *File,
parent_scope: *Scope,
+ parent_line: usize,
ref: Ref,
need_type: bool, // true when the caller needs also a typeRef for the return value
) AutodocErrors!DocData.WalkResult {
@@ -3761,9 +3973,12 @@ fn walkRef(
} else if (enum_value < Ref.typed_value_map.len) {
switch (ref) {
else => {
- std.debug.panic("TODO: handle {s} in `walkRef`\n", .{
- @tagName(ref),
- });
+ panicWithContext(
+ file,
+ 0,
+ "TODO: handle {s} in walkRef",
+ .{@tagName(ref)},
+ );
},
.undef => {
return DocData.WalkResult{ .expr = .@"undefined" };
@@ -3854,7 +4069,7 @@ fn walkRef(
}
} else {
const zir_index = enum_value - Ref.typed_value_map.len;
- return self.walkInstruction(file, parent_scope, zir_index, need_type);
+ return self.walkInstruction(file, parent_scope, parent_line, zir_index, need_type);
}
}
@@ -3886,13 +4101,13 @@ fn cteTodo(self: *Autodoc, msg: []const u8) error{OutOfMemory}!DocData.WalkResul
}
fn writeFileTableToJson(map: std.AutoArrayHashMapUnmanaged(*File, usize), jsw: anytype) !void {
- try jsw.beginObject();
+ try jsw.beginArray();
var it = map.iterator();
while (it.next()) |entry| {
- try jsw.objectField(entry.key_ptr.*.sub_file_path);
- try jsw.emitNumber(entry.value_ptr.*);
+ try jsw.arrayElem();
+ try jsw.emitString(entry.key_ptr.*.sub_file_path);
}
- try jsw.endObject();
+ try jsw.endArray();
}
fn writePackageTableToJson(
diff --git a/src/BuiltinFn.zig b/src/BuiltinFn.zig
index 04cad19354..3a13dde1ab 100644
--- a/src/BuiltinFn.zig
+++ b/src/BuiltinFn.zig
@@ -250,14 +250,14 @@ pub const list = list: {
"@byteSwap",
.{
.tag = .byte_swap,
- .param_count = 2,
+ .param_count = 1,
},
},
.{
"@bitReverse",
.{
.tag = .bit_reverse,
- .param_count = 2,
+ .param_count = 1,
},
},
.{
@@ -301,7 +301,7 @@ pub const list = list: {
"@clz",
.{
.tag = .clz,
- .param_count = 2,
+ .param_count = 1,
},
},
.{
@@ -336,7 +336,7 @@ pub const list = list: {
"@ctz",
.{
.tag = .ctz,
- .param_count = 2,
+ .param_count = 1,
},
},
.{
@@ -614,7 +614,7 @@ pub const list = list: {
"@popCount",
.{
.tag = .pop_count,
- .param_count = 2,
+ .param_count = 1,
},
},
.{
diff --git a/src/Compilation.zig b/src/Compilation.zig
index 17ffe356a3..353a2f1ca3 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -173,6 +173,7 @@ astgen_wait_group: WaitGroup = .{},
/// TODO: Remove this when Stage2 becomes the default compiler as it will already have this information.
export_symbol_names: std.ArrayListUnmanaged([]const u8) = .{},
+pub const default_stack_protector_buffer_size = 4;
pub const SemaError = Module.SemaError;
pub const CRTFile = struct {
@@ -810,7 +811,6 @@ pub const InitOptions = struct {
/// this flag would be set to disable this machinery to avoid false positives.
disable_lld_caching: bool = false,
cache_mode: CacheMode = .incremental,
- object_format: ?std.Target.ObjectFormat = null,
optimize_mode: std.builtin.Mode = .Debug,
keep_source_files_loaded: bool = false,
clang_argv: []const []const u8 = &[0][]const u8{},
@@ -838,6 +838,10 @@ pub const InitOptions = struct {
want_pie: ?bool = null,
want_sanitize_c: ?bool = null,
want_stack_check: ?bool = null,
+ /// null means default.
+ /// 0 means no stack protector.
+ /// other number means stack protection with that buffer size.
+ want_stack_protector: ?u32 = null,
want_red_zone: ?bool = null,
omit_frame_pointer: ?bool = null,
want_valgrind: ?bool = null,
@@ -1015,6 +1019,15 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
return error.ExportTableAndImportTableConflict;
}
+ // The `have_llvm` condition is here only because native backends cannot yet build compiler-rt.
+ // Once they are capable this condition could be removed. When removing this condition,
+ // also test the use case of `build-obj -fcompiler-rt` with the native backends
+ // and make sure the compiler-rt symbols are emitted.
+ const capable_of_building_compiler_rt = build_options.have_llvm;
+
+ const capable_of_building_zig_libc = build_options.have_llvm;
+ const capable_of_building_ssp = build_options.have_llvm;
+
const comp: *Compilation = comp: {
// For allocations that have the same lifetime as Compilation. This arena is used only during this
// initialization and then is freed in deinit().
@@ -1027,22 +1040,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
const comp = try arena.create(Compilation);
const root_name = try arena.dupeZ(u8, options.root_name);
- const ofmt = options.object_format orelse options.target.getObjectFormat();
-
- const use_stage1 = options.use_stage1 orelse blk: {
- // Even though we may have no Zig code to compile (depending on `options.main_pkg`),
- // we may need to use stage1 for building compiler-rt and other dependencies.
-
- if (build_options.omit_stage2)
- break :blk true;
- if (options.use_llvm) |use_llvm| {
- if (!use_llvm) {
- break :blk false;
- }
- }
-
- break :blk build_options.is_stage1;
- };
+ const use_stage1 = options.use_stage1 orelse false;
const cache_mode = if (use_stage1 and !options.disable_lld_caching)
CacheMode.whole
@@ -1068,7 +1066,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
break :blk true;
// If LLVM does not support the target, then we can't use it.
- if (!target_util.hasLlvmSupport(options.target, ofmt))
+ if (!target_util.hasLlvmSupport(options.target, options.target.ofmt))
break :blk false;
// Prefer LLVM for release builds.
@@ -1111,7 +1109,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
if (!build_options.have_llvm)
break :blk false;
- if (ofmt == .c)
+ if (options.target.ofmt == .c)
break :blk false;
if (options.want_lto) |lto| {
@@ -1167,9 +1165,6 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
break :blk false;
} else if (options.c_source_files.len == 0) {
break :blk false;
- } else if (options.target.os.tag == .windows and link_libcpp) {
- // https://github.com/ziglang/zig/issues/8531
- break :blk false;
} else if (options.target.cpu.arch.isRISCV()) {
// Clang and LLVM currently don't support RISC-V target-abi for LTO.
// Compiling with LTO may fail or produce undesired results.
@@ -1233,7 +1228,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
break :blk lm;
} else default_link_mode;
- const dll_export_fns = if (options.dll_export_fns) |explicit| explicit else is_dyn_lib or options.rdynamic;
+ const dll_export_fns = options.dll_export_fns orelse (is_dyn_lib or options.rdynamic);
const libc_dirs = try detectLibCIncludeDirs(
arena,
@@ -1288,11 +1283,36 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
const sanitize_c = options.want_sanitize_c orelse is_safe_mode;
- const stack_check: bool = b: {
- if (!target_util.supportsStackProbing(options.target))
- break :b false;
- break :b options.want_stack_check orelse is_safe_mode;
+ const stack_check: bool = options.want_stack_check orelse b: {
+ if (!target_util.supportsStackProbing(options.target)) break :b false;
+ break :b is_safe_mode;
};
+ if (stack_check and !target_util.supportsStackProbing(options.target))
+ return error.StackCheckUnsupportedByTarget;
+
+ const stack_protector: u32 = options.want_stack_protector orelse b: {
+ if (!target_util.supportsStackProtector(options.target)) break :b @as(u32, 0);
+
+ // This logic is checking for linking libc because otherwise our start code
+ // which is trying to set up TLS (i.e. the fs/gs registers) but the stack
+ // protection code depends on fs/gs registers being already set up.
+ // If we were able to annotate start code, or perhaps the entire std lib,
+ // as being exempt from stack protection checks, we could change this logic
+ // to supporting stack protection even when not linking libc.
+ // TODO file issue about this
+ if (!link_libc) break :b 0;
+ if (!capable_of_building_ssp) break :b 0;
+ if (is_safe_mode) break :b default_stack_protector_buffer_size;
+ break :b 0;
+ };
+ if (stack_protector != 0) {
+ if (!target_util.supportsStackProtector(options.target))
+ return error.StackProtectorUnsupportedByTarget;
+ if (!capable_of_building_ssp)
+ return error.StackProtectorUnsupportedByBackend;
+ if (!link_libc)
+ return error.StackProtectorUnavailableWithoutLibC;
+ }
const valgrind: bool = b: {
if (!target_util.hasValgrindSupport(options.target))
@@ -1370,13 +1390,14 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
cache.hash.add(options.target.os.getVersionRange());
cache.hash.add(options.is_native_os);
cache.hash.add(options.target.abi);
- cache.hash.add(ofmt);
+ cache.hash.add(options.target.ofmt);
cache.hash.add(pic);
cache.hash.add(pie);
cache.hash.add(lto);
cache.hash.add(unwind_tables);
cache.hash.add(tsan);
cache.hash.add(stack_check);
+ cache.hash.add(stack_protector);
cache.hash.add(red_zone);
cache.hash.add(omit_frame_pointer);
cache.hash.add(link_mode);
@@ -1678,7 +1699,6 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
.sysroot = sysroot,
.output_mode = options.output_mode,
.link_mode = link_mode,
- .object_format = ofmt,
.optimize_mode = options.optimize_mode,
.use_lld = use_lld,
.use_llvm = use_llvm,
@@ -1741,6 +1761,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
.valgrind = valgrind,
.tsan = tsan,
.stack_check = stack_check,
+ .stack_protector = stack_protector,
.red_zone = red_zone,
.omit_frame_pointer = omit_frame_pointer,
.single_threaded = single_threaded,
@@ -1769,6 +1790,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
.headerpad_size = options.headerpad_size,
.headerpad_max_install_names = options.headerpad_max_install_names,
.dead_strip_dylibs = options.dead_strip_dylibs,
+ .force_undefined_symbols = .{},
});
errdefer bin_file.destroy();
comp.* = .{
@@ -1822,6 +1844,8 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
};
errdefer comp.destroy();
+ const target = comp.getTarget();
+
// Add a `CObject` for each `c_source_files`.
try comp.c_object_table.ensureTotalCapacity(gpa, options.c_source_files.len);
for (options.c_source_files) |c_source_file| {
@@ -1837,9 +1861,9 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
const have_bin_emit = comp.bin_file.options.emit != null or comp.whole_bin_sub_path != null;
- if (have_bin_emit and !comp.bin_file.options.skip_linker_dependencies) {
- if (comp.getTarget().isDarwin()) {
- switch (comp.getTarget().abi) {
+ if (have_bin_emit and !comp.bin_file.options.skip_linker_dependencies and target.ofmt != .c) {
+ if (target.isDarwin()) {
+ switch (target.abi) {
.none,
.simulator,
.macabi,
@@ -1850,9 +1874,9 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
// If we need to build glibc for the target, add work items for it.
// We go through the work queue so that building can be done in parallel.
if (comp.wantBuildGLibCFromSource()) {
- if (!target_util.canBuildLibC(comp.getTarget())) return error.LibCUnavailable;
+ if (!target_util.canBuildLibC(target)) return error.LibCUnavailable;
- if (glibc.needsCrtiCrtn(comp.getTarget())) {
+ if (glibc.needsCrtiCrtn(target)) {
try comp.work_queue.write(&[_]Job{
.{ .glibc_crt_file = .crti_o },
.{ .glibc_crt_file = .crtn_o },
@@ -1865,10 +1889,10 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
});
}
if (comp.wantBuildMuslFromSource()) {
- if (!target_util.canBuildLibC(comp.getTarget())) return error.LibCUnavailable;
+ if (!target_util.canBuildLibC(target)) return error.LibCUnavailable;
try comp.work_queue.ensureUnusedCapacity(6);
- if (musl.needsCrtiCrtn(comp.getTarget())) {
+ if (musl.needsCrtiCrtn(target)) {
comp.work_queue.writeAssumeCapacity(&[_]Job{
.{ .musl_crt_file = .crti_o },
.{ .musl_crt_file = .crtn_o },
@@ -1885,7 +1909,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
});
}
if (comp.wantBuildWasiLibcFromSource()) {
- if (!target_util.canBuildLibC(comp.getTarget())) return error.LibCUnavailable;
+ if (!target_util.canBuildLibC(target)) return error.LibCUnavailable;
const wasi_emulated_libs = comp.bin_file.options.wasi_emulated_libs;
try comp.work_queue.ensureUnusedCapacity(wasi_emulated_libs.len + 2); // worst-case we need all components
@@ -1900,7 +1924,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
});
}
if (comp.wantBuildMinGWFromSource()) {
- if (!target_util.canBuildLibC(comp.getTarget())) return error.LibCUnavailable;
+ if (!target_util.canBuildLibC(target)) return error.LibCUnavailable;
const static_lib_jobs = [_]Job{
.{ .mingw_crt_file = .mingw32_lib },
@@ -1917,9 +1941,13 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
for (mingw.always_link_libs) |name| {
try comp.bin_file.options.system_libs.put(comp.gpa, name, .{});
}
+
+ // LLD might drop some symbols as unused during LTO and GCing, therefore,
+ // we force mark them for resolution here.
+ try comp.bin_file.options.force_undefined_symbols.put(comp.gpa, "_tls_index", {});
}
// Generate Windows import libs.
- if (comp.getTarget().os.tag == .windows) {
+ if (target.os.tag == .windows) {
const count = comp.bin_file.options.system_libs.count();
try comp.work_queue.ensureUnusedCapacity(count);
var i: usize = 0;
@@ -1938,15 +1966,6 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
try comp.work_queue.writeItem(.libtsan);
}
- // The `have_llvm` condition is here only because native backends cannot yet build compiler-rt.
- // Once they are capable this condition could be removed. When removing this condition,
- // also test the use case of `build-obj -fcompiler-rt` with the native backends
- // and make sure the compiler-rt symbols are emitted.
- const capable_of_building_compiler_rt = build_options.have_llvm;
-
- const capable_of_building_zig_libc = build_options.have_llvm;
- const capable_of_building_ssp = comp.bin_file.options.use_stage1;
-
if (comp.bin_file.options.include_compiler_rt and capable_of_building_compiler_rt) {
if (is_exe_or_dyn_lib) {
log.debug("queuing a job to build compiler_rt_lib", .{});
@@ -1960,8 +1979,11 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
}
}
if (needs_c_symbols) {
- // MinGW provides no libssp, use our own implementation.
- if (comp.getTarget().isMinGW() and capable_of_building_ssp) {
+ // Related: https://github.com/ziglang/zig/issues/7265.
+ if (comp.bin_file.options.stack_protector != 0 and
+ (!comp.bin_file.options.link_libc or
+ !target_util.libcProvidesStackProtector(target)))
+ {
try comp.work_queue.writeItem(.{ .libssp = {} });
}
@@ -2176,8 +2198,7 @@ pub fn update(comp: *Compilation) !void {
comp.c_object_work_queue.writeItemAssumeCapacity(key);
}
- const use_stage1 = build_options.omit_stage2 or
- (build_options.is_stage1 and comp.bin_file.options.use_stage1);
+ const use_stage1 = build_options.have_stage1 and comp.bin_file.options.use_stage1;
if (comp.bin_file.options.module) |module| {
module.compile_log_text.shrinkAndFree(module.gpa, 0);
module.generation += 1;
@@ -2353,8 +2374,7 @@ fn flush(comp: *Compilation, prog_node: *std.Progress.Node) !void {
};
comp.link_error_flags = comp.bin_file.errorFlags();
- const use_stage1 = build_options.omit_stage2 or
- (build_options.is_stage1 and comp.bin_file.options.use_stage1);
+ const use_stage1 = build_options.have_stage1 and comp.bin_file.options.use_stage1;
if (!use_stage1) {
if (comp.bin_file.options.module) |module| {
try link.File.C.flushEmitH(module);
@@ -2812,7 +2832,7 @@ pub fn performAllTheWork(
comp.work_queue_wait_group.reset();
defer comp.work_queue_wait_group.wait();
- const use_stage1 = build_options.is_stage1 and comp.bin_file.options.use_stage1;
+ const use_stage1 = build_options.have_stage1 and comp.bin_file.options.use_stage1;
{
const astgen_frame = tracy.namedFrame("astgen");
@@ -2915,9 +2935,6 @@ pub fn performAllTheWork(
fn processOneJob(comp: *Compilation, job: Job) !void {
switch (job) {
.codegen_decl => |decl_index| {
- if (build_options.omit_stage2)
- @panic("sadly stage2 is omitted from this build to save memory on the CI server");
-
const module = comp.bin_file.options.module.?;
const decl = module.declPtr(decl_index);
@@ -2952,9 +2969,6 @@ fn processOneJob(comp: *Compilation, job: Job) !void {
}
},
.codegen_func => |func| {
- if (build_options.omit_stage2)
- @panic("sadly stage2 is omitted from this build to save memory on the CI server");
-
const named_frame = tracy.namedFrame("codegen_func");
defer named_frame.end();
@@ -2965,9 +2979,6 @@ fn processOneJob(comp: *Compilation, job: Job) !void {
};
},
.emit_h_decl => |decl_index| {
- if (build_options.omit_stage2)
- @panic("sadly stage2 is omitted from this build to save memory on the CI server");
-
const module = comp.bin_file.options.module.?;
const decl = module.declPtr(decl_index);
@@ -3026,9 +3037,6 @@ fn processOneJob(comp: *Compilation, job: Job) !void {
}
},
.analyze_decl => |decl_index| {
- if (build_options.omit_stage2)
- @panic("sadly stage2 is omitted from this build to save memory on the CI server");
-
const module = comp.bin_file.options.module.?;
module.ensureDeclAnalyzed(decl_index) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
@@ -3036,9 +3044,6 @@ fn processOneJob(comp: *Compilation, job: Job) !void {
};
},
.update_embed_file => |embed_file| {
- if (build_options.omit_stage2)
- @panic("sadly stage2 is omitted from this build to save memory on the CI server");
-
const named_frame = tracy.namedFrame("update_embed_file");
defer named_frame.end();
@@ -3049,9 +3054,6 @@ fn processOneJob(comp: *Compilation, job: Job) !void {
};
},
.update_line_number => |decl_index| {
- if (build_options.omit_stage2)
- @panic("sadly stage2 is omitted from this build to save memory on the CI server");
-
const named_frame = tracy.namedFrame("update_line_number");
defer named_frame.end();
@@ -3070,9 +3072,6 @@ fn processOneJob(comp: *Compilation, job: Job) !void {
};
},
.analyze_pkg => |pkg| {
- if (build_options.omit_stage2)
- @panic("sadly stage2 is omitted from this build to save memory on the CI server");
-
const named_frame = tracy.namedFrame("analyze_pkg");
defer named_frame.end();
@@ -3418,7 +3417,7 @@ pub fn cImport(comp: *Compilation, c_src: []const u8) !CImportResult {
var man = comp.obtainCObjectCacheManifest();
defer man.deinit();
- const use_stage1 = build_options.is_stage1 and comp.bin_file.options.use_stage1;
+ const use_stage1 = build_options.have_stage1 and comp.bin_file.options.use_stage1;
man.hash.add(@as(u16, 0xb945)); // Random number to distinguish translate-c from compiling C objects
man.hash.add(use_stage1);
@@ -3735,7 +3734,8 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: *std.P
else
c_source_basename[0 .. c_source_basename.len - std.fs.path.extension(c_source_basename).len];
- const o_ext = comp.bin_file.options.object_format.fileExt(comp.bin_file.options.target.cpu.arch);
+ const target = comp.getTarget();
+ const o_ext = target.ofmt.fileExt(target.cpu.arch);
const digest = if (!comp.disable_c_depfile and try man.hit()) man.final() else blk: {
var argv = std.ArrayList([]const u8).init(comp.gpa);
defer argv.deinit();
@@ -3755,22 +3755,67 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: *std.P
};
const o_basename = try std.fmt.allocPrint(arena, "{s}{s}", .{ o_basename_noext, out_ext });
+ try argv.appendSlice(&[_][]const u8{
+ self_exe_path,
+ "clang",
+ c_object.src.src_path,
+ });
+
+ const ext = classifyFileExt(c_object.src.src_path);
+
+ // When all these flags are true, it means that the entire purpose of
+ // this compilation is to perform a single zig cc operation. This means
+ // that we could "tail call" clang by doing an execve, and any use of
+ // the caching system would actually be problematic since the user is
+ // presumably doing their own caching by using dep file flags.
+ if (std.process.can_execv and direct_o and
+ comp.disable_c_depfile and comp.clang_passthrough_mode)
+ {
+ try comp.addCCArgs(arena, &argv, ext, null);
+ try argv.appendSlice(c_object.src.extra_flags);
+
+ const out_obj_path = if (comp.bin_file.options.emit) |emit|
+ try emit.directory.join(arena, &.{emit.sub_path})
+ else
+ "/dev/null";
+
+ try argv.ensureUnusedCapacity(5);
+ switch (comp.clang_preprocessor_mode) {
+ .no => argv.appendSliceAssumeCapacity(&[_][]const u8{ "-c", "-o", out_obj_path }),
+ .yes => argv.appendSliceAssumeCapacity(&[_][]const u8{ "-E", "-o", out_obj_path }),
+ .stdout => argv.appendAssumeCapacity("-E"),
+ }
+
+ if (comp.emit_asm != null) {
+ argv.appendAssumeCapacity("-S");
+ } else if (comp.emit_llvm_ir != null) {
+ argv.appendSliceAssumeCapacity(&[_][]const u8{ "-emit-llvm", "-S" });
+ } else if (comp.emit_llvm_bc != null) {
+ argv.appendAssumeCapacity("-emit-llvm");
+ }
+
+ if (comp.verbose_cc) {
+ dump_argv(argv.items);
+ }
+
+ const err = std.process.execv(arena, argv.items);
+ fatal("unable to execv clang: {s}", .{@errorName(err)});
+ }
+
// We can't know the digest until we do the C compiler invocation,
// so we need a temporary filename.
const out_obj_path = try comp.tmpFilePath(arena, o_basename);
var zig_cache_tmp_dir = try comp.local_cache_directory.handle.makeOpenPath("tmp", .{});
defer zig_cache_tmp_dir.close();
- try argv.appendSlice(&[_][]const u8{ self_exe_path, "clang" });
-
- const ext = classifyFileExt(c_object.src.src_path);
const out_dep_path: ?[]const u8 = if (comp.disable_c_depfile or !ext.clangSupportsDepFile())
null
else
try std.fmt.allocPrint(arena, "{s}.d", .{out_obj_path});
try comp.addCCArgs(arena, &argv, ext, out_dep_path);
+ try argv.appendSlice(c_object.src.extra_flags);
- try argv.ensureUnusedCapacity(6 + c_object.src.extra_flags.len);
+ try argv.ensureUnusedCapacity(5);
switch (comp.clang_preprocessor_mode) {
.no => argv.appendSliceAssumeCapacity(&[_][]const u8{ "-c", "-o", out_obj_path }),
.yes => argv.appendSliceAssumeCapacity(&[_][]const u8{ "-E", "-o", out_obj_path }),
@@ -3785,8 +3830,6 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: *std.P
argv.appendAssumeCapacity("-emit-llvm");
}
}
- argv.appendAssumeCapacity(c_object.src.src_path);
- argv.appendSliceAssumeCapacity(c_object.src.extra_flags);
if (comp.verbose_cc) {
dump_argv(argv.items);
@@ -4087,10 +4130,10 @@ pub fn addCCArgs(
}
if (!comp.bin_file.options.strip) {
- try argv.append("-g");
- switch (comp.bin_file.options.object_format) {
+ switch (target.ofmt) {
.coff => try argv.append("-gcodeview"),
- else => {},
+ .elf, .macho => try argv.append("-gdwarf-4"),
+ else => try argv.append("-g"),
}
}
@@ -4120,6 +4163,17 @@ pub fn addCCArgs(
try argv.append("-fno-omit-frame-pointer");
}
+ const ssp_buf_size = comp.bin_file.options.stack_protector;
+ if (ssp_buf_size != 0) {
+ try argv.appendSlice(&[_][]const u8{
+ "-fstack-protector-strong",
+ "--param",
+ try std.fmt.allocPrint(arena, "ssp-buffer-size={d}", .{ssp_buf_size}),
+ });
+ } else {
+ try argv.append("-fno-stack-protector");
+ }
+
switch (comp.bin_file.options.optimize_mode) {
.Debug => {
// windows c runtime requires -D_DEBUG if using debug libraries
@@ -4128,27 +4182,12 @@ pub fn addCCArgs(
// to -O1. Besides potentially impairing debugging, -O1/-Og significantly
// increases compile times.
try argv.append("-O0");
-
- if (comp.bin_file.options.link_libc and target.os.tag != .wasi) {
- try argv.append("-fstack-protector-strong");
- try argv.append("--param");
- try argv.append("ssp-buffer-size=4");
- } else {
- try argv.append("-fno-stack-protector");
- }
},
.ReleaseSafe => {
// See the comment in the BuildModeFastRelease case for why we pass -O2 rather
// than -O3 here.
try argv.append("-O2");
- if (comp.bin_file.options.link_libc and target.os.tag != .wasi) {
- try argv.append("-D_FORTIFY_SOURCE=2");
- try argv.append("-fstack-protector-strong");
- try argv.append("--param");
- try argv.append("ssp-buffer-size=4");
- } else {
- try argv.append("-fno-stack-protector");
- }
+ try argv.append("-D_FORTIFY_SOURCE=2");
},
.ReleaseFast => {
try argv.append("-DNDEBUG");
@@ -4158,12 +4197,10 @@ pub fn addCCArgs(
// Zig code than it is for C code. Also, C programmers are used to their code
// running in -O2 and thus the -O3 path has been tested less.
try argv.append("-O2");
- try argv.append("-fno-stack-protector");
},
.ReleaseSmall => {
try argv.append("-DNDEBUG");
try argv.append("-Os");
- try argv.append("-fno-stack-protector");
},
}
@@ -4656,7 +4693,7 @@ fn wantBuildLibCFromSource(comp: Compilation) bool {
};
return comp.bin_file.options.link_libc and is_exe_or_dyn_lib and
comp.bin_file.options.libc_installation == null and
- comp.bin_file.options.object_format != .c;
+ comp.bin_file.options.target.ofmt != .c;
}
fn wantBuildGLibCFromSource(comp: Compilation) bool {
@@ -4684,7 +4721,7 @@ fn wantBuildLibUnwindFromSource(comp: *Compilation) bool {
.Exe => true,
};
return is_exe_or_dyn_lib and comp.bin_file.options.link_libunwind and
- comp.bin_file.options.object_format != .c;
+ comp.bin_file.options.target.ofmt != .c;
}
fn setAllocFailure(comp: *Compilation) void {
@@ -4738,12 +4775,12 @@ pub fn generateBuiltinZigSource(comp: *Compilation, allocator: Allocator) Alloca
const target = comp.getTarget();
const generic_arch_name = target.cpu.arch.genericName();
- const use_stage1 = build_options.is_stage1 and comp.bin_file.options.use_stage1;
+ const use_stage1 = build_options.have_stage1 and comp.bin_file.options.use_stage1;
const zig_backend: std.builtin.CompilerBackend = blk: {
if (use_stage1) break :blk .stage1;
if (build_options.have_llvm and comp.bin_file.options.use_llvm) break :blk .stage2_llvm;
- if (comp.bin_file.options.object_format == .c) break :blk .stage2_c;
+ if (target.ofmt == .c) break :blk .stage2_c;
break :blk switch (target.cpu.arch) {
.wasm32, .wasm64 => std.builtin.CompilerBackend.stage2_wasm,
.arm, .armeb, .thumb, .thumbeb => .stage2_arm,
@@ -4763,8 +4800,6 @@ pub fn generateBuiltinZigSource(comp: *Compilation, allocator: Allocator) Alloca
\\/// feature detection (i.e. with `@hasDecl` or `@hasField`) over version checks.
\\pub const zig_version = std.SemanticVersion.parse("{s}") catch unreachable;
\\pub const zig_backend = std.builtin.CompilerBackend.{};
- \\/// Temporary until self-hosted supports the `cpu.arch` value.
- \\pub const stage2_arch: std.Target.Cpu.Arch = .{};
\\
\\pub const output_mode = std.builtin.OutputMode.{};
\\pub const link_mode = std.builtin.LinkMode.{};
@@ -4779,7 +4814,6 @@ pub fn generateBuiltinZigSource(comp: *Compilation, allocator: Allocator) Alloca
, .{
build_options.version,
std.zig.fmtId(@tagName(zig_backend)),
- std.zig.fmtId(@tagName(target.cpu.arch)),
std.zig.fmtId(@tagName(comp.bin_file.options.output_mode)),
std.zig.fmtId(@tagName(comp.bin_file.options.link_mode)),
comp.bin_file.options.is_test,
@@ -4894,6 +4928,7 @@ pub fn generateBuiltinZigSource(comp: *Compilation, allocator: Allocator) Alloca
\\ .cpu = cpu,
\\ .os = os,
\\ .abi = abi,
+ \\ .ofmt = object_format,
\\}};
\\pub const object_format = std.Target.ObjectFormat.{};
\\pub const mode = std.builtin.Mode.{};
@@ -4908,7 +4943,7 @@ pub fn generateBuiltinZigSource(comp: *Compilation, allocator: Allocator) Alloca
\\pub const code_model = std.builtin.CodeModel.{};
\\
, .{
- std.zig.fmtId(@tagName(comp.bin_file.options.object_format)),
+ std.zig.fmtId(@tagName(target.ofmt)),
std.zig.fmtId(@tagName(comp.bin_file.options.optimize_mode)),
link_libc,
comp.bin_file.options.link_libcpp,
@@ -5027,9 +5062,10 @@ fn buildOutputFromZig(
.link_mode = .Static,
.function_sections = true,
.no_builtin = true,
- .use_stage1 = build_options.is_stage1 and comp.bin_file.options.use_stage1,
+ .use_stage1 = build_options.have_stage1 and comp.bin_file.options.use_stage1,
.want_sanitize_c = false,
.want_stack_check = false,
+ .want_stack_protector = 0,
.want_red_zone = comp.bin_file.options.red_zone,
.omit_frame_pointer = comp.bin_file.options.omit_frame_pointer,
.want_valgrind = false,
@@ -5310,6 +5346,7 @@ pub fn build_crt_file(
.optimize_mode = comp.compilerRtOptMode(),
.want_sanitize_c = false,
.want_stack_check = false,
+ .want_stack_protector = 0,
.want_red_zone = comp.bin_file.options.red_zone,
.omit_frame_pointer = comp.bin_file.options.omit_frame_pointer,
.want_valgrind = false,
diff --git a/src/Liveness.zig b/src/Liveness.zig
index 435075a411..5a4bd2265e 100644
--- a/src/Liveness.zig
+++ b/src/Liveness.zig
@@ -267,6 +267,7 @@ pub fn categorizeOperand(
.byte_swap,
.bit_reverse,
.splat,
+ .error_set_has_value,
=> {
const o = air_datas[inst].ty_op;
if (o.operand == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none);
@@ -291,6 +292,7 @@ pub fn categorizeOperand(
.is_non_err_ptr,
.ptrtoint,
.bool_to_int,
+ .is_named_enum_value,
.tag_name,
.error_name,
.sqrt,
@@ -841,6 +843,7 @@ fn analyzeInst(
.byte_swap,
.bit_reverse,
.splat,
+ .error_set_has_value,
=> {
const o = inst_datas[inst].ty_op;
return trackOperands(a, new_set, inst, main_tomb, .{ o.operand, .none, .none });
@@ -858,6 +861,7 @@ fn analyzeInst(
.bool_to_int,
.ret,
.ret_load,
+ .is_named_enum_value,
.tag_name,
.error_name,
.sqrt,
diff --git a/src/Module.zig b/src/Module.zig
index 6122b417e4..a92849e127 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -84,7 +84,6 @@ string_literal_bytes: std.ArrayListUnmanaged(u8) = .{},
/// The set of all the generic function instantiations. This is used so that when a generic
/// function is called twice with the same comptime parameter arguments, both calls dispatch
/// to the same function.
-/// TODO: remove functions from this set when they are destroyed.
monomorphed_funcs: MonomorphedFuncsSet = .{},
/// The set of all comptime function calls that have been cached so that future calls
/// with the same parameters will get the same return value.
@@ -92,7 +91,6 @@ memoized_calls: MemoizedCallSet = .{},
/// Contains the values from `@setAlignStack`. A sparse table is used here
/// instead of a field of `Fn` because usage of `@setAlignStack` is rare, while
/// functions are many.
-/// TODO: remove functions from this set when they are destroyed.
align_stack_fns: std.AutoHashMapUnmanaged(*const Fn, SetAlignStack) = .{},
/// We optimize memory usage for a compilation with no compile errors by storing the
@@ -560,6 +558,10 @@ pub const Decl = struct {
gpa.destroy(extern_fn);
}
if (decl.getFunction()) |func| {
+ _ = mod.align_stack_fns.remove(func);
+ if (func.comptime_args != null) {
+ _ = mod.monomorphed_funcs.remove(func);
+ }
func.deinit(gpa);
gpa.destroy(func);
}
@@ -853,8 +855,6 @@ pub const EmitH = struct {
pub const ErrorSet = struct {
/// The Decl that corresponds to the error set itself.
owner_decl: Decl.Index,
- /// Offset from Decl node index, points to the error set AST node.
- node_offset: i32,
/// The string bytes are stored in the owner Decl arena.
/// These must be in sorted order. See sortNames.
names: NameMap,
@@ -866,7 +866,7 @@ pub const ErrorSet = struct {
return .{
.file_scope = owner_decl.getFileScope(),
.parent_decl_node = owner_decl.src_node,
- .lazy = LazySrcLoc.nodeOffset(self.node_offset),
+ .lazy = LazySrcLoc.nodeOffset(0),
};
}
@@ -893,12 +893,15 @@ pub const Struct = struct {
namespace: Namespace,
/// The Decl that corresponds to the struct itself.
owner_decl: Decl.Index,
- /// Offset from `owner_decl`, points to the struct AST node.
- node_offset: i32,
/// Index of the struct_decl ZIR instruction.
zir_index: Zir.Inst.Index,
layout: std.builtin.Type.ContainerLayout,
+ /// If the layout is not packed, this is the noreturn type.
+ /// If the layout is packed, this is the backing integer type of the packed struct.
+ /// Whether zig chooses this type or the user specifies it, it is stored here.
+ /// This will be set to the noreturn type until status is `have_layout`.
+ backing_int_ty: Type = Type.initTag(.noreturn),
status: enum {
none,
field_types_wip,
@@ -934,13 +937,41 @@ pub const Struct = struct {
/// If true then `default_val` is the comptime field value.
is_comptime: bool,
- /// Returns the field alignment, assuming the struct is not packed.
- pub fn normalAlignment(field: Field, target: Target) u32 {
- if (field.abi_align == 0) {
- return field.ty.abiAlignment(target);
- } else {
+ /// Returns the field alignment. If the struct is packed, returns 0.
+ pub fn alignment(
+ field: Field,
+ target: Target,
+ layout: std.builtin.Type.ContainerLayout,
+ ) u32 {
+ if (field.abi_align != 0) {
+ assert(layout != .Packed);
return field.abi_align;
}
+
+ switch (layout) {
+ .Packed => return 0,
+ .Auto => {
+ if (target.ofmt == .c) {
+ return alignmentExtern(field, target);
+ } else {
+ return field.ty.abiAlignment(target);
+ }
+ },
+ .Extern => return alignmentExtern(field, target),
+ }
+ }
+
+ pub fn alignmentExtern(field: Field, target: Target) u32 {
+ // This logic is duplicated in Type.abiAlignmentAdvanced.
+ const ty_abi_align = field.ty.abiAlignment(target);
+
+ if (field.ty.isAbiInt() and field.ty.intInfo(target).bits >= 128) {
+ // The C ABI requires 128 bit integer fields of structs
+ // to be 16-bytes aligned.
+ return @maximum(ty_abi_align, 16);
+ }
+
+ return ty_abi_align;
}
};
@@ -953,7 +984,7 @@ pub const Struct = struct {
return .{
.file_scope = owner_decl.getFileScope(),
.parent_decl_node = owner_decl.src_node,
- .lazy = LazySrcLoc.nodeOffset(s.node_offset),
+ .lazy = LazySrcLoc.nodeOffset(0),
};
}
@@ -968,7 +999,7 @@ pub const Struct = struct {
});
return s.srcLoc(mod);
};
- const node = owner_decl.relativeToNodeIndex(s.node_offset);
+ const node = owner_decl.relativeToNodeIndex(0);
const node_tags = tree.nodes.items(.tag);
switch (node_tags[node]) {
.container_decl,
@@ -1029,7 +1060,7 @@ pub const Struct = struct {
pub fn packedFieldBitOffset(s: Struct, target: Target, index: usize) u16 {
assert(s.layout == .Packed);
- assert(s.haveFieldTypes());
+ assert(s.haveLayout());
var bit_sum: u64 = 0;
for (s.fields.values()) |field, i| {
if (i == index) {
@@ -1037,19 +1068,7 @@ pub const Struct = struct {
}
bit_sum += field.ty.bitSize(target);
}
- return @intCast(u16, bit_sum);
- }
-
- pub fn packedIntegerBits(s: Struct, target: Target) u16 {
- return s.packedFieldBitOffset(target, s.fields.count());
- }
-
- pub fn packedIntegerType(s: Struct, target: Target, buf: *Type.Payload.Bits) Type {
- buf.* = .{
- .base = .{ .tag = .int_unsigned },
- .data = s.packedIntegerBits(target),
- };
- return Type.initPayload(&buf.base);
+ unreachable; // index out of bounds
}
};
@@ -1060,8 +1079,6 @@ pub const Struct = struct {
pub const EnumSimple = struct {
/// The Decl that corresponds to the enum itself.
owner_decl: Decl.Index,
- /// Offset from `owner_decl`, points to the enum decl AST node.
- node_offset: i32,
/// Set of field names in declaration order.
fields: NameMap,
@@ -1072,7 +1089,7 @@ pub const EnumSimple = struct {
return .{
.file_scope = owner_decl.getFileScope(),
.parent_decl_node = owner_decl.src_node,
- .lazy = LazySrcLoc.nodeOffset(self.node_offset),
+ .lazy = LazySrcLoc.nodeOffset(0),
};
}
};
@@ -1083,8 +1100,6 @@ pub const EnumSimple = struct {
pub const EnumNumbered = struct {
/// The Decl that corresponds to the enum itself.
owner_decl: Decl.Index,
- /// Offset from `owner_decl`, points to the enum decl AST node.
- node_offset: i32,
/// An integer type which is used for the numerical value of the enum.
/// Whether zig chooses this type or the user specifies it, it is stored here.
tag_ty: Type,
@@ -1103,7 +1118,7 @@ pub const EnumNumbered = struct {
return .{
.file_scope = owner_decl.getFileScope(),
.parent_decl_node = owner_decl.src_node,
- .lazy = LazySrcLoc.nodeOffset(self.node_offset),
+ .lazy = LazySrcLoc.nodeOffset(0),
};
}
};
@@ -1113,8 +1128,6 @@ pub const EnumNumbered = struct {
pub const EnumFull = struct {
/// The Decl that corresponds to the enum itself.
owner_decl: Decl.Index,
- /// Offset from `owner_decl`, points to the enum decl AST node.
- node_offset: i32,
/// An integer type which is used for the numerical value of the enum.
/// Whether zig chooses this type or the user specifies it, it is stored here.
tag_ty: Type,
@@ -1137,7 +1150,7 @@ pub const EnumFull = struct {
return .{
.file_scope = owner_decl.getFileScope(),
.parent_decl_node = owner_decl.src_node,
- .lazy = LazySrcLoc.nodeOffset(self.node_offset),
+ .lazy = LazySrcLoc.nodeOffset(0),
};
}
};
@@ -1155,8 +1168,6 @@ pub const Union = struct {
namespace: Namespace,
/// The Decl that corresponds to the union itself.
owner_decl: Decl.Index,
- /// Offset from `owner_decl`, points to the union decl AST node.
- node_offset: i32,
/// Index of the union_decl ZIR instruction.
zir_index: Zir.Inst.Index,
@@ -1203,7 +1214,7 @@ pub const Union = struct {
return .{
.file_scope = owner_decl.getFileScope(),
.parent_decl_node = owner_decl.src_node,
- .lazy = LazySrcLoc.nodeOffset(self.node_offset),
+ .lazy = LazySrcLoc.nodeOffset(0),
};
}
@@ -1218,7 +1229,7 @@ pub const Union = struct {
});
return u.srcLoc(mod);
};
- const node = owner_decl.relativeToNodeIndex(u.node_offset);
+ const node = owner_decl.relativeToNodeIndex(0);
const node_tags = tree.nodes.items(.tag);
var buf: [2]Ast.Node.Index = undefined;
switch (node_tags[node]) {
@@ -1357,18 +1368,20 @@ pub const Union = struct {
}
}
payload_align = @maximum(payload_align, 1);
- if (!have_tag or fields.len <= 1) return .{
- .abi_size = std.mem.alignForwardGeneric(u64, payload_size, payload_align),
- .abi_align = payload_align,
- .most_aligned_field = most_aligned_field,
- .most_aligned_field_size = most_aligned_field_size,
- .biggest_field = biggest_field,
- .payload_size = payload_size,
- .payload_align = payload_align,
- .tag_align = 0,
- .tag_size = 0,
- .padding = 0,
- };
+ if (!have_tag or !u.tag_ty.hasRuntimeBits()) {
+ return .{
+ .abi_size = std.mem.alignForwardGeneric(u64, payload_size, payload_align),
+ .abi_align = payload_align,
+ .most_aligned_field = most_aligned_field,
+ .most_aligned_field_size = most_aligned_field_size,
+ .biggest_field = biggest_field,
+ .payload_size = payload_size,
+ .payload_align = payload_align,
+ .tag_align = 0,
+ .tag_size = 0,
+ .padding = 0,
+ };
+ }
// Put the tag before or after the payload depending on which one's
// alignment is greater.
const tag_size = u.tag_ty.abiSize(target);
@@ -1410,8 +1423,6 @@ pub const Union = struct {
pub const Opaque = struct {
/// The Decl that corresponds to the opaque itself.
owner_decl: Decl.Index,
- /// Offset from `owner_decl`, points to the opaque decl AST node.
- node_offset: i32,
/// Represents the declarations inside this opaque.
namespace: Namespace,
@@ -1420,7 +1431,7 @@ pub const Opaque = struct {
return .{
.file_scope = owner_decl.getFileScope(),
.parent_decl_node = owner_decl.src_node,
- .lazy = LazySrcLoc.nodeOffset(self.node_offset),
+ .lazy = LazySrcLoc.nodeOffset(0),
};
}
@@ -1464,25 +1475,14 @@ pub const Fn = struct {
/// These never have .generic_poison for the Type
/// because the Type is needed to pass to `Type.eql` and for inserting comptime arguments
/// into the inst_map when analyzing the body of a generic function instantiation.
- /// Instead, the is_anytype knowledge is communicated via `anytype_args`.
+ /// Instead, the is_anytype knowledge is communicated via `isAnytypeParam`.
comptime_args: ?[*]TypedValue,
- /// When comptime_args is null, this is undefined. Otherwise, this flags each
- /// parameter and tells whether it is anytype.
- /// TODO apply the same enhancement for param_names below to this field.
- anytype_args: [*]bool,
-
- /// Prefer to use `getParamName` to access this because of the future improvement
- /// we want to do mentioned in the TODO below.
- /// Stored in gpa.
- /// TODO: change param ZIR instructions to be embedded inside the function
- /// ZIR instruction instead of before it, so that `zir_body_inst` can be used to
- /// determine param names rather than redundantly storing them here.
- param_names: []const [:0]const u8,
/// Precomputed hash for monomorphed_funcs.
/// This is important because it may be accessed when resizing monomorphed_funcs
/// while this Fn has already been added to the set, but does not have the
/// owner_decl, comptime_args, or other fields populated yet.
+ /// This field is undefined if comptime_args == null.
hash: u64,
/// Relative to owner Decl.
@@ -1590,18 +1590,43 @@ pub const Fn = struct {
gpa.destroy(node);
it = next;
}
-
- for (func.param_names) |param_name| {
- gpa.free(param_name);
- }
- gpa.free(func.param_names);
}
- pub fn getParamName(func: Fn, index: u32) [:0]const u8 {
- // TODO rework ZIR of parameters so that this function looks up
- // param names in ZIR instead of redundantly saving them into Fn.
- // const zir = func.owner_decl.getFileScope().zir;
- return func.param_names[index];
+ pub fn isAnytypeParam(func: Fn, mod: *Module, index: u32) bool {
+ const file = mod.declPtr(func.owner_decl).getFileScope();
+
+ const tags = file.zir.instructions.items(.tag);
+
+ const param_body = file.zir.getParamBody(func.zir_body_inst);
+ const param = param_body[index];
+
+ return switch (tags[param]) {
+ .param, .param_comptime => false,
+ .param_anytype, .param_anytype_comptime => true,
+ else => unreachable,
+ };
+ }
+
+ pub fn getParamName(func: Fn, mod: *Module, index: u32) [:0]const u8 {
+ const file = mod.declPtr(func.owner_decl).getFileScope();
+
+ const tags = file.zir.instructions.items(.tag);
+ const data = file.zir.instructions.items(.data);
+
+ const param_body = file.zir.getParamBody(func.zir_body_inst);
+ const param = param_body[index];
+
+ return switch (tags[param]) {
+ .param, .param_comptime => blk: {
+ const extra = file.zir.extraData(Zir.Inst.Param, data[param].pl_tok.payload_index);
+ break :blk file.zir.nullTerminatedString(extra.data.name);
+ },
+ .param_anytype, .param_anytype_comptime => blk: {
+ const param_data = data[param].str_tok;
+ break :blk param_data.get(file.zir);
+ },
+ else => unreachable,
+ };
}
pub fn hasInferredErrorSet(func: Fn, mod: *Module) bool {
@@ -4102,6 +4127,12 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void {
// The exports this Decl performs will be re-discovered, so we remove them here
// prior to re-analysis.
mod.deleteDeclExports(decl_index);
+
+ // Similarly, `@setAlignStack` invocations will be re-discovered.
+ if (decl.getFunction()) |func| {
+ _ = mod.align_stack_fns.remove(func);
+ }
+
// Dependencies will be re-discovered, so we remove them here prior to re-analysis.
for (decl.dependencies.keys()) |dep_index| {
const dep = mod.declPtr(dep_index);
@@ -4324,7 +4355,6 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
struct_obj.* = .{
.owner_decl = undefined, // set below
.fields = .{},
- .node_offset = 0, // it's the struct for the root file
.zir_index = undefined, // set below
.layout = .Auto,
.status = .none,
@@ -6047,17 +6077,17 @@ pub fn paramSrc(
else => unreachable,
};
var it = full.iterate(tree);
- while (true) {
- if (it.param_i == param_i) {
- const param = it.next().?;
+ var i: usize = 0;
+ while (it.next()) |param| : (i += 1) {
+ if (i == param_i) {
if (param.anytype_ellipsis3) |some| {
const main_token = tree.nodes.items(.main_token)[decl.src_node];
return .{ .token_offset_param = @bitCast(i32, some) - @bitCast(i32, main_token) };
}
return .{ .node_offset_param = decl.nodeIndexToRelative(param.type_expr) };
}
- _ = it.next();
}
+ unreachable;
}
pub fn argSrc(
@@ -6504,3 +6534,7 @@ pub fn addGlobalAssembly(mod: *Module, decl_index: Decl.Index, source: []const u
mod.global_assembly.putAssumeCapacityNoClobber(decl_index, duped_source);
}
+
+pub fn wantDllExports(mod: Module) bool {
+ return mod.comp.bin_file.options.dll_export_fns and mod.getTarget().os.tag == .windows;
+}
diff --git a/src/Sema.zig b/src/Sema.zig
index 2721ed5179..f884684d73 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -76,8 +76,14 @@ types_to_resolve: std.ArrayListUnmanaged(Air.Inst.Ref) = .{},
post_hoc_blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, *LabeledBlock) = .{},
/// Populated with the last compile error created.
err: ?*Module.ErrorMsg = null,
+/// True when analyzing a generic instantiation. Used to suppress some errors.
+is_generic_instantiation: bool = false,
+/// Set to true when analyzing a func type instruction so that nested generic
+/// function types will emit generic poison instead of a partial type.
+no_partial_func_ty: bool = false,
const std = @import("std");
+const math = std.math;
const mem = std.mem;
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
@@ -772,7 +778,6 @@ fn analyzeBodyInner(
.optional_payload_unsafe => try sema.zirOptionalPayload(block, inst, false),
.optional_payload_unsafe_ptr => try sema.zirOptionalPayloadPtr(block, inst, false),
.optional_type => try sema.zirOptionalType(block, inst),
- .param_type => try sema.zirParamType(block, inst),
.ptr_type => try sema.zirPtrType(block, inst),
.overflow_arithmetic_ptr => try sema.zirOverflowArithmeticPtr(block, inst),
.ref => try sema.zirRef(block, inst),
@@ -816,7 +821,6 @@ fn analyzeBodyInner(
.embed_file => try sema.zirEmbedFile(block, inst),
.error_name => try sema.zirErrorName(block, inst),
.tag_name => try sema.zirTagName(block, inst),
- .reify => try sema.zirReify(block, inst),
.type_name => try sema.zirTypeName(block, inst),
.frame_type => try sema.zirFrameType(block, inst),
.frame_size => try sema.zirFrameSize(block, inst),
@@ -876,9 +880,6 @@ fn analyzeBodyInner(
.add => try sema.zirArithmetic(block, inst, .add),
.addwrap => try sema.zirArithmetic(block, inst, .addwrap),
.add_sat => try sema.zirArithmetic(block, inst, .add_sat),
- .mod_rem => try sema.zirArithmetic(block, inst, .mod_rem),
- .mod => try sema.zirArithmetic(block, inst, .mod),
- .rem => try sema.zirArithmetic(block, inst, .rem),
.mul => try sema.zirArithmetic(block, inst, .mul),
.mulwrap => try sema.zirArithmetic(block, inst, .mulwrap),
.mul_sat => try sema.zirArithmetic(block, inst, .mul_sat),
@@ -891,6 +892,10 @@ fn analyzeBodyInner(
.div_floor => try sema.zirDivFloor(block, inst),
.div_trunc => try sema.zirDivTrunc(block, inst),
+ .mod_rem => try sema.zirModRem(block, inst),
+ .mod => try sema.zirMod(block, inst),
+ .rem => try sema.zirRem(block, inst),
+
.maximum => try sema.zirMinMax(block, inst, .max),
.minimum => try sema.zirMinMax(block, inst, .min),
@@ -950,6 +955,7 @@ fn analyzeBodyInner(
.select => try sema.zirSelect( block, extended),
.error_to_int => try sema.zirErrorToInt( block, extended),
.int_to_error => try sema.zirIntToError( block, extended),
+ .reify => try sema.zirReify( block, extended, inst),
// zig fmt: on
.fence => {
try sema.zirFence(block, extended);
@@ -1494,7 +1500,8 @@ pub fn resolveInst(sema: *Sema, zir_ref: Zir.Inst.Ref) !Air.Inst.Ref {
// Finally, the last section of indexes refers to the map of ZIR=>AIR.
const inst = sema.inst_map.get(@intCast(u32, i)).?;
- if (sema.typeOf(inst).tag() == .generic_poison) return error.GenericPoison;
+ const ty = sema.typeOf(inst);
+ if (ty.tag() == .generic_poison) return error.GenericPoison;
return inst;
}
@@ -1577,8 +1584,7 @@ pub fn setupErrorReturnTrace(sema: *Sema, block: *Block, last_arg_index: usize)
// st.index = 0;
const index_field_ptr = try sema.fieldPtr(&err_trace_block, src, st_ptr, "index", src, true);
- const zero = try sema.addConstant(Type.usize, Value.zero);
- try sema.storePtr2(&err_trace_block, src, index_field_ptr, src, zero, src, .store);
+ try sema.storePtr2(&err_trace_block, src, index_field_ptr, src, .zero_usize, src, .store);
// @errorReturnTrace() = &st;
_ = try err_trace_block.addUnOp(.set_err_return_trace, st_ptr);
@@ -1695,7 +1701,10 @@ fn resolveMaybeUndefValIntable(
.elem_ptr => check = check.castTag(.elem_ptr).?.data.array_ptr,
.eu_payload_ptr, .opt_payload_ptr => check = check.cast(Value.Payload.PayloadPtr).?.data.container_ptr,
.generic_poison => return error.GenericPoison,
- else => return val,
+ else => {
+ try sema.resolveLazyValue(block, src, val);
+ return val;
+ },
};
}
@@ -1818,10 +1827,21 @@ fn failWithInvalidComptimeFieldStore(sema: *Sema, block: *Block, init_src: LazyS
const tree = try sema.getAstTree(block);
const decl = sema.mod.declPtr(decl_index);
- const field_src = enumFieldSrcLoc(decl, tree.*, container_ty.getNodeOffset(), field_index);
+ const field_src = enumFieldSrcLoc(decl, tree.*, 0, field_index);
const default_value_src: LazySrcLoc = .{ .node_offset_field_default = field_src.node_offset.x };
- try sema.errNote(block, default_value_src, msg, "default value set here", .{});
+ try sema.mod.errNoteNonLazy(default_value_src.toSrcLoc(decl), msg, "default value set here", .{});
+ break :msg msg;
+ };
+ return sema.failWithOwnedErrorMsg(msg);
+}
+
+fn failWithUseOfAsync(sema: *Sema, block: *Block, src: LazySrcLoc) CompileError {
+ const msg = msg: {
+ const msg = try sema.errMsg(block, src, "async has not been implemented in the self-hosted compiler yet", .{});
+ errdefer msg.destroy(sema.gpa);
+
+ try sema.errNote(block, src, msg, "to use async enable the stage1 compiler with either '-fstage1' or by setting '.use_stage1 = true` in your 'build.zig' script", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
@@ -1855,7 +1875,7 @@ fn addFieldErrNote(
const decl_index = container_ty.getOwnerDecl();
const decl = mod.declPtr(decl_index);
const tree = try sema.getAstTree(block);
- const field_src = enumFieldSrcLoc(decl, tree.*, container_ty.getNodeOffset(), field_index);
+ const field_src = enumFieldSrcLoc(decl, tree.*, 0, field_index);
try mod.errNoteNonLazy(field_src.toSrcLoc(decl), parent, format, args);
}
@@ -1895,8 +1915,6 @@ fn failWithOwnedErrorMsg(sema: *Sema, err_msg: *Module.ErrorMsg) CompileError {
}
const mod = sema.mod;
- sema.err = err_msg;
-
{
errdefer err_msg.destroy(mod.gpa);
if (err_msg.src_loc.lazy == .unneeded) {
@@ -1914,8 +1932,10 @@ fn failWithOwnedErrorMsg(sema: *Sema, err_msg: *Module.ErrorMsg) CompileError {
const gop = mod.failed_decls.getOrPutAssumeCapacity(sema.owner_decl_index);
if (gop.found_existing) {
// If there are multiple errors for the same Decl, prefer the first one added.
+ sema.err = null;
err_msg.destroy(mod.gpa);
} else {
+ sema.err = err_msg;
gop.value_ptr.* = err_msg;
}
return error.AnalysisFail;
@@ -2228,6 +2248,16 @@ pub fn analyzeStructDecl(
break :blk decls_len;
} else 0;
+ if (small.has_backing_int) {
+ const backing_int_body_len = sema.code.extra[extra_index];
+ extra_index += 1; // backing_int_body_len
+ if (backing_int_body_len == 0) {
+ extra_index += 1; // backing_int_ref
+ } else {
+ extra_index += backing_int_body_len; // backing_int_body_inst
+ }
+ }
+
_ = try sema.mod.scanNamespace(&struct_obj.namespace, extra_index, decls_len, new_decl);
}
@@ -2251,7 +2281,7 @@ fn zirStructDecl(
const struct_obj = try new_decl_arena_allocator.create(Module.Struct);
const struct_ty = try Type.Tag.@"struct".create(new_decl_arena_allocator, struct_obj);
const struct_val = try Value.Tag.ty.create(new_decl_arena_allocator, struct_ty);
- const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, .{
+ const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
.ty = Type.type,
.val = struct_val,
}, small.name_strategy, "struct", inst);
@@ -2261,7 +2291,6 @@ fn zirStructDecl(
struct_obj.* = .{
.owner_decl = new_decl_index,
.fields = .{},
- .node_offset = src.node_offset.x,
.zir_index = inst,
.layout = small.layout,
.status = .none,
@@ -2283,6 +2312,7 @@ fn zirStructDecl(
fn createAnonymousDeclTypeNamed(
sema: *Sema,
block: *Block,
+ src: LazySrcLoc,
typed_value: TypedValue,
name_strategy: Zir.Inst.NameStrategy,
anon_prefix: []const u8,
@@ -2292,7 +2322,8 @@ fn createAnonymousDeclTypeNamed(
const namespace = block.namespace;
const src_scope = block.wip_capture_scope;
const src_decl = mod.declPtr(block.src_decl);
- const new_decl_index = try mod.allocateNewDecl(namespace, src_decl.src_node, src_scope);
+ const src_node = src_decl.relativeToNodeIndex(src.node_offset.x);
+ const new_decl_index = try mod.allocateNewDecl(namespace, src_node, src_scope);
errdefer mod.destroyDecl(new_decl_index);
switch (name_strategy) {
@@ -2367,7 +2398,7 @@ fn createAnonymousDeclTypeNamed(
},
else => {},
};
- return sema.createAnonymousDeclTypeNamed(block, typed_value, .anon, anon_prefix, null);
+ return sema.createAnonymousDeclTypeNamed(block, src, typed_value, .anon, anon_prefix, null);
},
}
}
@@ -2431,7 +2462,7 @@ fn zirEnumDecl(
};
const enum_ty = Type.initPayload(&enum_ty_payload.base);
const enum_val = try Value.Tag.ty.create(new_decl_arena_allocator, enum_ty);
- const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, .{
+ const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
.ty = Type.type,
.val = enum_val,
}, small.name_strategy, "enum", inst);
@@ -2445,7 +2476,6 @@ fn zirEnumDecl(
.tag_ty_inferred = true,
.fields = .{},
.values = .{},
- .node_offset = src.node_offset.x,
.namespace = .{
.parent = block.namespace,
.ty = enum_ty,
@@ -2467,18 +2497,6 @@ fn zirEnumDecl(
extra_index = try mod.scanNamespace(&enum_obj.namespace, extra_index, decls_len, new_decl);
const body = sema.code.extra[extra_index..][0..body_len];
- if (fields_len == 0) {
- assert(body.len == 0);
- if (tag_type_ref != .none) {
- const ty = try sema.resolveType(block, tag_ty_src, tag_type_ref);
- if (ty.zigTypeTag() != .Int and ty.zigTypeTag() != .ComptimeInt) {
- return sema.fail(block, tag_ty_src, "expected integer tag type, found '{}'", .{ty.fmt(sema.mod)});
- }
- enum_obj.tag_ty = try ty.copy(new_decl_arena_allocator);
- enum_obj.tag_ty_inferred = false;
- }
- return decl_val;
- }
extra_index += body.len;
const bit_bags_count = std.math.divCeil(usize, fields_len, 32) catch unreachable;
@@ -2536,6 +2554,9 @@ fn zirEnumDecl(
}
enum_obj.tag_ty = try ty.copy(decl_arena_allocator);
enum_obj.tag_ty_inferred = false;
+ } else if (fields_len == 0) {
+ enum_obj.tag_ty = try Type.Tag.int_unsigned.create(decl_arena_allocator, 0);
+ enum_obj.tag_ty_inferred = true;
} else {
const bits = std.math.log2_int_ceil(usize, fields_len);
enum_obj.tag_ty = try Type.Tag.int_unsigned.create(decl_arena_allocator, bits);
@@ -2673,7 +2694,7 @@ fn zirUnionDecl(
const union_ty = Type.initPayload(&union_payload.base);
const union_val = try Value.Tag.ty.create(new_decl_arena_allocator, union_ty);
const mod = sema.mod;
- const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, .{
+ const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
.ty = Type.type,
.val = union_val,
}, small.name_strategy, "union", inst);
@@ -2684,7 +2705,6 @@ fn zirUnionDecl(
.owner_decl = new_decl_index,
.tag_ty = Type.initTag(.@"null"),
.fields = .{},
- .node_offset = src.node_offset.x,
.zir_index = inst,
.layout = small.layout,
.status = .none,
@@ -2742,7 +2762,7 @@ fn zirOpaqueDecl(
};
const opaque_ty = Type.initPayload(&opaque_ty_payload.base);
const opaque_val = try Value.Tag.ty.create(new_decl_arena_allocator, opaque_ty);
- const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, .{
+ const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
.ty = Type.type,
.val = opaque_val,
}, small.name_strategy, "opaque", inst);
@@ -2752,7 +2772,6 @@ fn zirOpaqueDecl(
opaque_obj.* = .{
.owner_decl = new_decl_index,
- .node_offset = src.node_offset.x,
.namespace = .{
.parent = block.namespace,
.ty = opaque_ty,
@@ -2791,7 +2810,7 @@ fn zirErrorSetDecl(
const error_set_ty = try Type.Tag.error_set.create(new_decl_arena_allocator, error_set);
const error_set_val = try Value.Tag.ty.create(new_decl_arena_allocator, error_set_ty);
const mod = sema.mod;
- const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, .{
+ const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
.ty = Type.type,
.val = error_set_val,
}, name_strategy, "error", inst);
@@ -2816,7 +2835,6 @@ fn zirErrorSetDecl(
error_set.* = .{
.owner_decl = new_decl_index,
- .node_offset = inst_data.src_node,
.names = names,
};
try new_decl.finalizeNewArena(&new_decl_arena);
@@ -3068,7 +3086,7 @@ fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
const candidate = block.instructions.items[search_index];
switch (air_tags[candidate]) {
- .dbg_stmt => continue,
+ .dbg_stmt, .dbg_block_begin, .dbg_block_end => continue,
.store => break candidate,
else => break :ct,
}
@@ -3080,7 +3098,7 @@ fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
const candidate = block.instructions.items[search_index];
switch (air_tags[candidate]) {
- .dbg_stmt => continue,
+ .dbg_stmt, .dbg_block_begin, .dbg_block_end => continue,
.alloc => {
if (Air.indexToRef(candidate) != alloc) break :ct;
break;
@@ -3298,7 +3316,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
const candidate = block.instructions.items[search_index];
switch (air_tags[candidate]) {
- .dbg_stmt => continue,
+ .dbg_stmt, .dbg_block_begin, .dbg_block_end => continue,
.store => break candidate,
else => break :ct,
}
@@ -3310,7 +3328,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
const candidate = block.instructions.items[search_index];
switch (air_tags[candidate]) {
- .dbg_stmt => continue,
+ .dbg_stmt, .dbg_block_begin, .dbg_block_end => continue,
.bitcast => break candidate,
else => break :ct,
}
@@ -3322,7 +3340,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
const candidate = block.instructions.items[search_index];
switch (air_tags[candidate]) {
- .dbg_stmt => continue,
+ .dbg_stmt, .dbg_block_begin, .dbg_block_end => continue,
.constant => break candidate,
else => break :ct,
}
@@ -3596,8 +3614,6 @@ fn validateUnionInit(
union_ptr: Air.Inst.Ref,
is_comptime: bool,
) CompileError!void {
- const union_obj = union_ty.cast(Type.Payload.Union).?.data;
-
if (instrs.len != 1) {
const msg = msg: {
const msg = try sema.errMsg(
@@ -3631,7 +3647,8 @@ fn validateUnionInit(
const field_src: LazySrcLoc = .{ .node_offset_initializer = field_ptr_data.src_node };
const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data;
const field_name = sema.code.nullTerminatedString(field_ptr_extra.field_name_start);
- const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_src);
+ // Validate the field access but ignore the index since we want the tag enum field index.
+ _ = try sema.unionFieldIndex(block, union_ty, field_name, field_src);
const air_tags = sema.air_instructions.items(.tag);
const air_datas = sema.air_instructions.items(.data);
const field_ptr_air_ref = sema.inst_map.get(field_ptr).?;
@@ -3690,7 +3707,9 @@ fn validateUnionInit(
break;
}
- const tag_val = try Value.Tag.enum_field_index.create(sema.arena, field_index);
+ const tag_ty = union_ty.unionTagTypeHypothetical();
+ const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name).?);
+ const tag_val = try Value.Tag.enum_field_index.create(sema.arena, enum_field_index);
if (init_val) |val| {
// Our task is to delete all the `field_ptr` and `store` instructions, and insert
@@ -3707,7 +3726,7 @@ fn validateUnionInit(
}
try sema.requireFunctionBlock(block, init_src);
- const new_tag = try sema.addConstant(union_obj.tag_ty, tag_val);
+ const new_tag = try sema.addConstant(tag_ty, tag_val);
_ = try block.addBinOp(.set_union_tag, union_ptr, new_tag);
}
@@ -3754,11 +3773,13 @@ fn validateStructInit(
}
var root_msg: ?*Module.ErrorMsg = null;
+ errdefer if (root_msg) |msg| msg.destroy(sema.gpa);
const struct_ptr = try sema.resolveInst(struct_ptr_zir_ref);
if ((is_comptime or block.is_comptime) and
(try sema.resolveDefinedValue(block, init_src, struct_ptr)) != null)
{
+ try sema.resolveStructLayout(block, init_src, struct_ty);
// In this case the only thing we need to do is evaluate the implicit
// store instructions for default field values, and report any missing fields.
// Avoid the cost of the extra machinery for detecting a comptime struct init value.
@@ -3929,6 +3950,7 @@ fn validateStructInit(
}
if (root_msg) |msg| {
+ root_msg = null;
if (struct_ty.castTag(.@"struct")) |struct_obj| {
const fqn = try struct_obj.data.getFullyQualifiedName(sema.mod);
defer gpa.free(fqn);
@@ -3952,6 +3974,7 @@ fn validateStructInit(
try sema.storePtr2(block, init_src, struct_ptr, init_src, struct_init, init_src, .store);
return;
}
+ try sema.resolveStructLayout(block, init_src, struct_ty);
// Our task is to insert `store` instructions for all the default field values.
for (found_fields) |field_ptr, i| {
@@ -3987,6 +4010,8 @@ fn zirValidateArrayInit(
if (instrs.len != array_len and array_ty.isTuple()) {
const struct_obj = array_ty.castTag(.tuple).?.data;
var root_msg: ?*Module.ErrorMsg = null;
+ errdefer if (root_msg) |msg| msg.destroy(sema.gpa);
+
for (struct_obj.values) |default_val, i| {
if (i < instrs.len) continue;
@@ -4001,6 +4026,7 @@ fn zirValidateArrayInit(
}
if (root_msg) |msg| {
+ root_msg = null;
return sema.failWithOwnedErrorMsg(msg);
}
}
@@ -4038,6 +4064,19 @@ fn zirValidateArrayInit(
// Determine whether the value stored to this pointer is comptime-known.
+ if (array_ty.isTuple()) {
+ if (array_ty.structFieldValueComptime(i)) |opv| {
+ element_vals[i] = opv;
+ continue;
+ }
+ } else {
+ // Array has one possible value, so value is always comptime-known
+ if (opt_opv) |opv| {
+ element_vals[i] = opv;
+ continue;
+ }
+ }
+
const elem_ptr_air_ref = sema.inst_map.get(elem_ptr).?;
const elem_ptr_air_inst = Air.refToIndex(elem_ptr_air_ref).?;
// Find the block index of the elem_ptr so that we can look at the next
@@ -4054,19 +4093,6 @@ fn zirValidateArrayInit(
}
first_block_index = @minimum(first_block_index, block_index);
- if (array_ty.isTuple()) {
- if (array_ty.structFieldValueComptime(i)) |opv| {
- element_vals[i] = opv;
- continue;
- }
- } else {
- // Array has one possible value, so value is always comptime-known
- if (opt_opv) |opv| {
- element_vals[i] = opv;
- continue;
- }
- }
-
// If the next instructon is a store with a comptime operand, this element
// is comptime.
const next_air_inst = block.instructions.items[block_index + 1];
@@ -4433,43 +4459,6 @@ fn zirStoreNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!v
return sema.storePtr2(block, src, ptr, src, operand, src, if (is_ret) .ret_ptr else .store);
}
-fn zirParamType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const callee_src = sema.src;
-
- const inst_data = sema.code.instructions.items(.data)[inst].param_type;
- const callee = try sema.resolveInst(inst_data.callee);
- const callee_ty = sema.typeOf(callee);
- var param_index = inst_data.param_index;
-
- const fn_ty = if (callee_ty.tag() == .bound_fn) fn_ty: {
- const bound_fn_val = try sema.resolveConstValue(block, .unneeded, callee, undefined);
- const bound_fn = bound_fn_val.castTag(.bound_fn).?.data;
- const fn_ty = sema.typeOf(bound_fn.func_inst);
- param_index += 1;
- break :fn_ty fn_ty;
- } else callee_ty;
-
- const fn_info = if (fn_ty.zigTypeTag() == .Pointer)
- fn_ty.childType().fnInfo()
- else
- fn_ty.fnInfo();
-
- if (param_index >= fn_info.param_types.len) {
- if (fn_info.is_var_args) {
- return sema.addType(Type.initTag(.var_args_param));
- }
- // TODO implement begin_call/end_call Zir instructions and check
- // argument count before casting arguments to parameter types.
- return sema.fail(block, callee_src, "wrong number of arguments", .{});
- }
-
- if (fn_info.param_types[param_index].tag() == .generic_poison) {
- return sema.addType(Type.initTag(.var_args_param));
- }
-
- return sema.addType(fn_info.param_types[param_index]);
-}
-
fn zirStr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
@@ -4775,7 +4764,7 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr
fn zirSuspendBlock(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
- return sema.fail(parent_block, src, "TODO: implement Sema.zirSuspendBlock", .{});
+ return sema.failWithUseOfAsync(parent_block, src);
}
fn zirBlock(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -5403,6 +5392,17 @@ fn lookupInNamespace(
}
}
+ {
+ var i: usize = 0;
+ while (i < candidates.items.len) {
+ if (candidates.items[i] == sema.owner_decl_index) {
+ _ = candidates.orderedRemove(i);
+ } else {
+ i += 1;
+ }
+ }
+ }
+
switch (candidates.items.len) {
0 => {},
1 => {
@@ -5439,6 +5439,19 @@ fn lookupInNamespace(
return null;
}
+fn funcDeclSrc(sema: *Sema, block: *Block, src: LazySrcLoc, func_inst: Air.Inst.Ref) !?Module.SrcLoc {
+ const func_val = (try sema.resolveMaybeUndefVal(block, src, func_inst)) orelse return null;
+ if (func_val.isUndef()) return null;
+ const owner_decl_index = switch (func_val.tag()) {
+ .extern_fn => func_val.castTag(.extern_fn).?.data.owner_decl,
+ .function => func_val.castTag(.function).?.data.owner_decl,
+ .decl_ref => sema.mod.declPtr(func_val.castTag(.decl_ref).?.data).val.castTag(.function).?.data.owner_decl,
+ else => return null,
+ };
+ const owner_decl = sema.mod.declPtr(owner_decl_index);
+ return owner_decl.srcLoc();
+}
+
fn zirCall(
sema: *Sema,
block: *Block,
@@ -5451,13 +5464,14 @@ fn zirCall(
const func_src: LazySrcLoc = .{ .node_offset_call_func = inst_data.src_node };
const call_src = inst_data.src();
const extra = sema.code.extraData(Zir.Inst.Call, inst_data.payload_index);
- const args = sema.code.refSlice(extra.end, extra.data.flags.args_len);
+ const args_len = extra.data.flags.args_len;
const modifier = @intToEnum(std.builtin.CallOptions.Modifier, extra.data.flags.packed_modifier);
const ensure_result_used = extra.data.flags.ensure_result_used;
var func = try sema.resolveInst(extra.data.callee);
var resolved_args: []Air.Inst.Ref = undefined;
+ var arg_index: u32 = 0;
const func_type = sema.typeOf(func);
@@ -5468,16 +5482,93 @@ fn zirCall(
const bound_func = try sema.resolveValue(block, .unneeded, func, undefined);
const bound_data = &bound_func.cast(Value.Payload.BoundFn).?.data;
func = bound_data.func_inst;
- resolved_args = try sema.arena.alloc(Air.Inst.Ref, args.len + 1);
- resolved_args[0] = bound_data.arg0_inst;
- for (args) |zir_arg, i| {
- resolved_args[i + 1] = try sema.resolveInst(zir_arg);
- }
+ resolved_args = try sema.arena.alloc(Air.Inst.Ref, args_len + 1);
+ resolved_args[arg_index] = bound_data.arg0_inst;
+ arg_index += 1;
} else {
- resolved_args = try sema.arena.alloc(Air.Inst.Ref, args.len);
- for (args) |zir_arg, i| {
- resolved_args[i] = try sema.resolveInst(zir_arg);
+ resolved_args = try sema.arena.alloc(Air.Inst.Ref, args_len);
+ }
+ const total_args = args_len + @boolToInt(bound_arg_src != null);
+
+ const callee_ty = sema.typeOf(func);
+ const func_ty = func_ty: {
+ switch (callee_ty.zigTypeTag()) {
+ .Fn => break :func_ty callee_ty,
+ .Pointer => {
+ const ptr_info = callee_ty.ptrInfo().data;
+ if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag() == .Fn) {
+ break :func_ty ptr_info.pointee_type;
+ }
+ },
+ else => {},
}
+ return sema.fail(block, func_src, "type '{}' not a function", .{callee_ty.fmt(sema.mod)});
+ };
+ const func_ty_info = func_ty.fnInfo();
+
+ const fn_params_len = func_ty_info.param_types.len;
+ check_args: {
+ if (func_ty_info.is_var_args) {
+ assert(func_ty_info.cc == .C);
+ if (total_args >= fn_params_len) break :check_args;
+ } else if (fn_params_len == total_args) {
+ break :check_args;
+ }
+
+ const decl_src = try sema.funcDeclSrc(block, func_src, func);
+ const member_str = if (bound_arg_src != null) "member function " else "";
+ const variadic_str = if (func_ty_info.is_var_args) "at least " else "";
+ const msg = msg: {
+ const msg = try sema.errMsg(
+ block,
+ func_src,
+ "{s}expected {s}{d} argument(s), found {d}",
+ .{
+ member_str,
+ variadic_str,
+ fn_params_len - @boolToInt(bound_arg_src != null),
+ args_len,
+ },
+ );
+ errdefer msg.destroy(sema.gpa);
+
+ if (decl_src) |some| try sema.mod.errNoteNonLazy(some, msg, "function declared here", .{});
+ break :msg msg;
+ };
+ return sema.failWithOwnedErrorMsg(msg);
+ }
+
+ const args_body = sema.code.extra[extra.end..];
+
+ const parent_comptime = block.is_comptime;
+ // `extra_index` and `arg_index` are separate since the bound function is passed as the first argument.
+ var extra_index: usize = 0;
+ var arg_start: u32 = args_len;
+ while (extra_index < args_len) : ({
+ extra_index += 1;
+ arg_index += 1;
+ }) {
+ const arg_end = sema.code.extra[extra.end + extra_index];
+ defer arg_start = arg_end;
+
+ const param_ty = if (arg_index >= fn_params_len or
+ func_ty_info.param_types[arg_index].tag() == .generic_poison)
+ Type.initTag(.var_args_param)
+ else
+ func_ty_info.param_types[arg_index];
+
+ const old_comptime = block.is_comptime;
+ defer block.is_comptime = old_comptime;
+ // Generate args to comptime params in comptime block.
+ block.is_comptime = parent_comptime;
+ if (arg_index < fn_params_len and func_ty_info.comptime_params[arg_index]) {
+ block.is_comptime = true;
+ }
+
+ const param_ty_inst = try sema.addType(param_ty);
+ try sema.inst_map.put(sema.gpa, inst, param_ty_inst);
+
+ resolved_args[arg_index] = try sema.resolveBody(block, args_body[arg_start..arg_end], inst);
}
return sema.analyzeCall(block, func, func_src, call_src, modifier, ensure_result_used, resolved_args, bound_arg_src);
@@ -5487,11 +5578,15 @@ const GenericCallAdapter = struct {
generic_fn: *Module.Fn,
precomputed_hash: u64,
func_ty_info: Type.Payload.Function.Data,
- /// Unlike comptime_args, the Type here is not always present.
- /// .generic_poison is used to communicate non-anytype parameters.
- comptime_tvs: []const TypedValue,
+ args: []const Arg,
module: *Module,
+ const Arg = struct {
+ ty: Type,
+ val: Value,
+ is_anytype: bool,
+ };
+
pub fn eql(ctx: @This(), adapted_key: void, other_key: *Module.Fn) bool {
_ = adapted_key;
// The generic function Decl is guaranteed to be the first dependency
@@ -5502,11 +5597,11 @@ const GenericCallAdapter = struct {
const other_comptime_args = other_key.comptime_args.?;
for (other_comptime_args[0..ctx.func_ty_info.param_types.len]) |other_arg, i| {
- const this_arg = ctx.comptime_tvs[i];
+ const this_arg = ctx.args[i];
const this_is_comptime = this_arg.val.tag() != .generic_poison;
const other_is_comptime = other_arg.val.tag() != .generic_poison;
- const this_is_anytype = this_arg.ty.tag() != .generic_poison;
- const other_is_anytype = other_key.anytype_args[i];
+ const this_is_anytype = this_arg.is_anytype;
+ const other_is_anytype = other_key.isAnytypeParam(ctx.module, @intCast(u32, i));
if (other_is_anytype != this_is_anytype) return false;
if (other_is_comptime != this_is_comptime) return false;
@@ -5524,7 +5619,17 @@ const GenericCallAdapter = struct {
}
} else if (this_is_comptime) {
// Both are comptime parameters but not anytype parameters.
- if (!this_arg.val.eql(other_arg.val, other_arg.ty, ctx.module)) {
+ // We assert no error is possible here because any lazy values must be resolved
+ // before inserting into the generic function hash map.
+ const is_eql = Value.eqlAdvanced(
+ this_arg.val,
+ this_arg.ty,
+ other_arg.val,
+ other_arg.ty,
+ ctx.module,
+ null,
+ ) catch unreachable;
+ if (!is_eql) {
return false;
}
}
@@ -5540,6 +5645,37 @@ const GenericCallAdapter = struct {
}
};
+fn addComptimeReturnTypeNote(
+ sema: *Sema,
+ block: *Block,
+ func: Air.Inst.Ref,
+ func_src: LazySrcLoc,
+ return_ty: Type,
+ parent: *Module.ErrorMsg,
+ requires_comptime: bool,
+) !void {
+ if (!requires_comptime) return;
+
+ const src_loc = if (try sema.funcDeclSrc(block, func_src, func)) |capture| blk: {
+ var src_loc = capture;
+ src_loc.lazy = .{ .node_offset_fn_type_ret_ty = 0 };
+ break :blk src_loc;
+ } else blk: {
+ const src_decl = sema.mod.declPtr(block.src_decl);
+ break :blk func_src.toSrcLoc(src_decl);
+ };
+ if (return_ty.tag() == .generic_poison) {
+ return sema.mod.errNoteNonLazy(src_loc, parent, "generic function is instantiated with a comptime only return type", .{});
+ }
+ try sema.mod.errNoteNonLazy(
+ src_loc,
+ parent,
+ "function is being called at comptime because it returns a comptime only type '{}'",
+ .{return_ty.fmt(sema.mod)},
+ );
+ try sema.explainWhyTypeIsComptime(block, func_src, parent, src_loc, return_ty);
+}
+
fn analyzeCall(
sema: *Sema,
block: *Block,
@@ -5571,13 +5707,20 @@ fn analyzeCall(
const func_ty_info = func_ty.fnInfo();
const cc = func_ty_info.cc;
if (cc == .Naked) {
- // TODO add error note: declared here
- return sema.fail(
- block,
- func_src,
- "unable to call function with naked calling convention",
- .{},
- );
+ const decl_src = try sema.funcDeclSrc(block, func_src, func);
+ const msg = msg: {
+ const msg = try sema.errMsg(
+ block,
+ func_src,
+ "unable to call function with naked calling convention",
+ .{},
+ );
+ errdefer msg.destroy(sema.gpa);
+
+ if (decl_src) |some| try sema.mod.errNoteNonLazy(some, msg, "function declared here", .{});
+ break :msg msg;
+ };
+ return sema.failWithOwnedErrorMsg(msg);
}
const fn_params_len = func_ty_info.param_types.len;
if (func_ty_info.is_var_args) {
@@ -5612,7 +5755,7 @@ fn analyzeCall(
.never_inline => Air.Inst.Tag.call_never_inline,
.always_tail => Air.Inst.Tag.call_always_tail,
- .async_kw => return sema.fail(block, call_src, "TODO implement async call", .{}),
+ .async_kw => return sema.failWithUseOfAsync(block, call_src),
};
if (modifier == .never_inline and func_ty_info.cc == .Inline) {
@@ -5623,9 +5766,11 @@ fn analyzeCall(
var is_generic_call = func_ty_info.is_generic;
var is_comptime_call = block.is_comptime or modifier == .compile_time;
+ var comptime_only_ret_ty = false;
if (!is_comptime_call) {
if (sema.typeRequiresComptime(block, func_src, func_ty_info.return_type)) |ct| {
is_comptime_call = ct;
+ comptime_only_ret_ty = ct;
} else |err| switch (err) {
error.GenericPoison => is_generic_call = true,
else => |e| return e,
@@ -5654,6 +5799,7 @@ fn analyzeCall(
error.ComptimeReturn => {
is_inline_call = true;
is_comptime_call = true;
+ comptime_only_ret_ty = true;
},
else => |e| return e,
}
@@ -5664,8 +5810,12 @@ fn analyzeCall(
}
const result: Air.Inst.Ref = if (is_inline_call) res: {
- // TODO explain why function is being called at comptime
- const func_val = try sema.resolveConstValue(block, func_src, func, "function being called at comptime must be comptime known");
+ const func_val = sema.resolveConstValue(block, func_src, func, "function being called at comptime must be comptime known") catch |err| {
+ if (err == error.AnalysisFail and sema.err != null) {
+ try sema.addComptimeReturnTypeNote(block, func, func_src, func_ty_info.return_type, sema.err.?, comptime_only_ret_ty);
+ }
+ return err;
+ };
const module_fn = switch (func_val.tag()) {
.decl_ref => mod.declPtr(func_val.castTag(.decl_ref).?.data).val.castTag(.function).?.data,
.function => func_val.castTag(.function).?.data,
@@ -5777,12 +5927,16 @@ fn analyzeCall(
is_comptime_call,
&should_memoize,
memoized_call_key,
+ // last 4 arguments are only used when reporting errors
+ undefined,
+ undefined,
+ undefined,
+ undefined,
) catch |err| switch (err) {
error.NeededSourceLocation => {
- sema.inst_map.clearRetainingCapacity();
+ _ = sema.inst_map.remove(inst);
const decl = sema.mod.declPtr(block.src_decl);
child_block.src_decl = block.src_decl;
- arg_i = 0;
try sema.analyzeInlineCallArg(
block,
&child_block,
@@ -5794,6 +5948,10 @@ fn analyzeCall(
is_comptime_call,
&should_memoize,
memoized_call_key,
+ func,
+ func_src,
+ func_ty_info.return_type,
+ comptime_only_ret_ty,
);
return error.AnalysisFail;
},
@@ -5956,7 +6114,18 @@ fn analyzeCall(
else => |e| return e,
};
} else {
- args[i] = uncasted_arg;
+ args[i] = sema.coerceVarArgParam(block, uncasted_arg, .unneeded) catch |err| switch (err) {
+ error.NeededSourceLocation => {
+ const decl = sema.mod.declPtr(block.src_decl);
+ _ = try sema.coerceVarArgParam(
+ block,
+ uncasted_arg,
+ Module.argSrc(call_src.node_offset.x, sema.gpa, decl, i, bound_arg_src),
+ );
+ return error.AnalysisFail;
+ },
+ else => |e| return e,
+ };
}
}
@@ -5998,6 +6167,10 @@ fn analyzeInlineCallArg(
is_comptime_call: bool,
should_memoize: *bool,
memoized_call_key: Module.MemoizedCall.Key,
+ func: Air.Inst.Ref,
+ func_src: LazySrcLoc,
+ ret_ty: Type,
+ comptime_only_ret_ty: bool,
) !void {
const zir_tags = sema.code.instructions.items(.tag);
switch (zir_tags[inst]) {
@@ -6013,14 +6186,23 @@ fn analyzeInlineCallArg(
new_fn_info.param_types[arg_i.*] = param_ty;
const uncasted_arg = uncasted_args[arg_i.*];
if (try sema.typeRequiresComptime(arg_block, arg_src, param_ty)) {
- _ = try sema.resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "argument to parameter with comptime only type must be comptime known");
+ _ = sema.resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "argument to parameter with comptime only type must be comptime known") catch |err| {
+ if (err == error.AnalysisFail and sema.err != null) {
+ try sema.addComptimeReturnTypeNote(arg_block, func, func_src, ret_ty, sema.err.?, comptime_only_ret_ty);
+ }
+ return err;
+ };
}
const casted_arg = try sema.coerce(arg_block, param_ty, uncasted_arg, arg_src);
try sema.inst_map.putNoClobber(sema.gpa, inst, casted_arg);
if (is_comptime_call) {
- // TODO explain why function is being called at comptime
- const arg_val = try sema.resolveConstMaybeUndefVal(arg_block, arg_src, casted_arg, "argument to function being called at comptime must be comptime known");
+ const arg_val = sema.resolveConstMaybeUndefVal(arg_block, arg_src, casted_arg, "argument to function being called at comptime must be comptime known") catch |err| {
+ if (err == error.AnalysisFail and sema.err != null) {
+ try sema.addComptimeReturnTypeNote(arg_block, func, func_src, ret_ty, sema.err.?, comptime_only_ret_ty);
+ }
+ return err;
+ };
switch (arg_val.tag()) {
.generic_poison, .generic_poison_type => {
// This function is currently evaluated as part of an as-of-yet unresolvable
@@ -6050,8 +6232,12 @@ fn analyzeInlineCallArg(
try sema.inst_map.putNoClobber(sema.gpa, inst, uncasted_arg);
if (is_comptime_call) {
- // TODO explain why function is being called at comptime
- const arg_val = try sema.resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "argument to function being called at comptime must be comptime known");
+ const arg_val = sema.resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "argument to function being called at comptime must be comptime known") catch |err| {
+ if (err == error.AnalysisFail and sema.err != null) {
+ try sema.addComptimeReturnTypeNote(arg_block, func, func_src, ret_ty, sema.err.?, comptime_only_ret_ty);
+ }
+ return err;
+ };
switch (arg_val.tag()) {
.generic_poison, .generic_poison_type => {
// This function is currently evaluated as part of an as-of-yet unresolvable
@@ -6157,8 +6343,7 @@ fn instantiateGenericCall(
var hasher = std.hash.Wyhash.init(0);
std.hash.autoHash(&hasher, @ptrToInt(module_fn));
- const comptime_tvs = try sema.arena.alloc(TypedValue, func_ty_info.param_types.len);
-
+ const generic_args = try sema.arena.alloc(GenericCallAdapter.Arg, func_ty_info.param_types.len);
{
var i: usize = 0;
for (fn_info.param_body) |inst| {
@@ -6182,8 +6367,9 @@ fn instantiateGenericCall(
else => continue,
}
+ const arg_ty = sema.typeOf(uncasted_args[i]);
+
if (is_comptime) {
- const arg_ty = sema.typeOf(uncasted_args[i]);
const arg_val = sema.analyzeGenericCallArgVal(block, .unneeded, uncasted_args[i]) catch |err| switch (err) {
error.NeededSourceLocation => {
const decl = sema.mod.declPtr(block.src_decl);
@@ -6196,27 +6382,30 @@ fn instantiateGenericCall(
arg_val.hash(arg_ty, &hasher, mod);
if (is_anytype) {
arg_ty.hashWithHasher(&hasher, mod);
- comptime_tvs[i] = .{
+ generic_args[i] = .{
.ty = arg_ty,
.val = arg_val,
+ .is_anytype = true,
};
} else {
- comptime_tvs[i] = .{
- .ty = Type.initTag(.generic_poison),
+ generic_args[i] = .{
+ .ty = arg_ty,
.val = arg_val,
+ .is_anytype = false,
};
}
} else if (is_anytype) {
- const arg_ty = sema.typeOf(uncasted_args[i]);
arg_ty.hashWithHasher(&hasher, mod);
- comptime_tvs[i] = .{
+ generic_args[i] = .{
.ty = arg_ty,
.val = Value.initTag(.generic_poison),
+ .is_anytype = true,
};
} else {
- comptime_tvs[i] = .{
- .ty = Type.initTag(.generic_poison),
+ generic_args[i] = .{
+ .ty = arg_ty,
.val = Value.initTag(.generic_poison),
+ .is_anytype = false,
};
}
@@ -6230,7 +6419,7 @@ fn instantiateGenericCall(
.generic_fn = module_fn,
.precomputed_hash = precomputed_hash,
.func_ty_info = func_ty_info,
- .comptime_tvs = comptime_tvs,
+ .args = generic_args,
.module = mod,
};
const gop = try mod.monomorphed_funcs.getOrPutAdapted(gpa, {}, adapter);
@@ -6261,6 +6450,7 @@ fn instantiateGenericCall(
new_decl.is_exported = fn_owner_decl.is_exported;
new_decl.has_align = fn_owner_decl.has_align;
new_decl.has_linksection_or_addrspace = fn_owner_decl.has_linksection_or_addrspace;
+ new_decl.@"linksection" = fn_owner_decl.@"linksection";
new_decl.@"addrspace" = fn_owner_decl.@"addrspace";
new_decl.zir_decl_index = fn_owner_decl.zir_decl_index;
new_decl.alive = true; // This Decl is called at runtime.
@@ -6305,6 +6495,7 @@ fn instantiateGenericCall(
.comptime_args = try new_decl_arena_allocator.alloc(TypedValue, uncasted_args.len),
.comptime_args_fn_inst = module_fn.zir_body_inst,
.preallocated_new_func = new_module_func,
+ .is_generic_instantiation = true,
};
defer child_sema.deinit();
@@ -6386,12 +6577,9 @@ fn instantiateGenericCall(
errdefer new_func.deinit(gpa);
assert(new_func == new_module_func);
- const anytype_args = try new_decl_arena_allocator.alloc(bool, func_ty_info.param_types.len);
- new_func.anytype_args = anytype_args.ptr;
arg_i = 0;
for (fn_info.param_body) |inst| {
var is_comptime = false;
- var is_anytype = false;
switch (zir_tags[inst]) {
.param => {
is_comptime = func_ty_info.paramIsComptime(arg_i);
@@ -6400,11 +6588,9 @@ fn instantiateGenericCall(
is_comptime = true;
},
.param_anytype => {
- is_anytype = true;
is_comptime = func_ty_info.paramIsComptime(arg_i);
},
.param_anytype_comptime => {
- is_anytype = true;
is_comptime = true;
},
else => continue,
@@ -6412,10 +6598,9 @@ fn instantiateGenericCall(
// We populate the Type here regardless because it is needed by
// `GenericCallAdapter.eql` as well as function body analysis.
- // Whether it is anytype is communicated by `anytype_args`.
+ // Whether it is anytype is communicated by `isAnytypeParam`.
const arg = child_sema.inst_map.get(inst).?;
const copied_arg_ty = try child_sema.typeOf(arg).copy(new_decl_arena_allocator);
- anytype_args[arg_i] = is_anytype;
if (try sema.typeRequiresComptime(block, .unneeded, copied_arg_ty)) {
is_comptime = true;
@@ -6588,8 +6773,13 @@ fn zirOptionalType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
- const src = inst_data.src();
- const child_type = try sema.resolveType(block, src, inst_data.operand);
+ const operand_src: LazySrcLoc = .{ .node_offset_un_op = inst_data.src_node };
+ const child_type = try sema.resolveType(block, operand_src, inst_data.operand);
+ if (child_type.zigTypeTag() == .Opaque) {
+ return sema.fail(block, operand_src, "opaque type '{}' cannot be optional", .{child_type.fmt(sema.mod)});
+ } else if (child_type.zigTypeTag() == .Null) {
+ return sema.fail(block, operand_src, "type '{}' cannot be optional", .{child_type.fmt(sema.mod)});
+ }
const opt_type = try Type.optional(sema.arena, child_type);
return sema.addType(opt_type);
@@ -6662,6 +6852,9 @@ fn zirAnyframeType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
+ if (true) {
+ return sema.failWithUseOfAsync(block, inst_data.src());
+ }
const operand_src: LazySrcLoc = .{ .node_offset_anyframe_type = inst_data.src_node };
const return_type = try sema.resolveType(block, operand_src, inst_data.operand);
const anyframe_type = try Type.Tag.anyframe_T.create(sema.arena, return_type);
@@ -6685,6 +6878,15 @@ fn zirErrorUnionType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
error_set.fmt(sema.mod),
});
}
+ if (payload.zigTypeTag() == .Opaque) {
+ return sema.fail(block, rhs_src, "error union with payload of opaque type '{}' not allowed", .{
+ payload.fmt(sema.mod),
+ });
+ } else if (payload.zigTypeTag() == .ErrorSet) {
+ return sema.fail(block, rhs_src, "error union with payload of error set type '{}' not allowed", .{
+ payload.fmt(sema.mod),
+ });
+ }
const err_union_ty = try Type.errorUnion(sema.arena, error_set, payload, sema.mod);
return sema.addType(err_union_ty);
}
@@ -6716,11 +6918,10 @@ fn zirErrorToInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
const uncasted_operand = try sema.resolveInst(extra.operand);
const operand = try sema.coerce(block, Type.anyerror, uncasted_operand, operand_src);
- const result_ty = Type.u16;
if (try sema.resolveMaybeUndefVal(block, src, operand)) |val| {
if (val.isUndef()) {
- return sema.addConstUndef(result_ty);
+ return sema.addConstUndef(Type.err_int);
}
switch (val.tag()) {
.@"error" => {
@@ -6729,14 +6930,14 @@ fn zirErrorToInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat
.base = .{ .tag = .int_u64 },
.data = (try sema.mod.getErrorValue(val.castTag(.@"error").?.data.name)).value,
};
- return sema.addConstant(result_ty, Value.initPayload(&payload.base));
+ return sema.addConstant(Type.err_int, Value.initPayload(&payload.base));
},
// This is not a valid combination with the type `anyerror`.
.the_only_possible_value => unreachable,
// Assume it's already encoded as an integer.
- else => return sema.addConstant(result_ty, val),
+ else => return sema.addConstant(Type.err_int, val),
}
}
@@ -6745,14 +6946,14 @@ fn zirErrorToInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat
if (!op_ty.isAnyError()) {
const names = op_ty.errorSetNames();
switch (names.len) {
- 0 => return sema.addConstant(result_ty, Value.zero),
- 1 => return sema.addIntUnsigned(result_ty, sema.mod.global_error_set.get(names[0]).?),
+ 0 => return sema.addConstant(Type.err_int, Value.zero),
+ 1 => return sema.addIntUnsigned(Type.err_int, sema.mod.global_error_set.get(names[0]).?),
else => {},
}
}
try sema.requireRuntimeBlock(block, src, operand_src);
- return block.addBitCast(result_ty, operand);
+ return block.addBitCast(Type.err_int, operand);
}
fn zirIntToError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
@@ -6763,7 +6964,7 @@ fn zirIntToError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat
const src = LazySrcLoc.nodeOffset(extra.node);
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
const uncasted_operand = try sema.resolveInst(extra.operand);
- const operand = try sema.coerce(block, Type.u16, uncasted_operand, operand_src);
+ const operand = try sema.coerce(block, Type.err_int, uncasted_operand, operand_src);
const target = sema.mod.getTarget();
if (try sema.resolveDefinedValue(block, operand_src, operand)) |value| {
@@ -6780,7 +6981,10 @@ fn zirIntToError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat
try sema.requireRuntimeBlock(block, src, operand_src);
if (block.wantSafety()) {
const is_lt_len = try block.addUnOp(.cmp_lt_errors_len, operand);
- try sema.addSafetyCheck(block, is_lt_len, .invalid_error_code);
+ const zero_val = try sema.addConstant(Type.err_int, Value.zero);
+ const is_non_zero = try block.addBinOp(.cmp_neq, operand, zero_val);
+ const ok = try block.addBinOp(.bit_and, is_lt_len, is_non_zero);
+ try sema.addSafetyCheck(block, ok, .invalid_error_code);
}
return block.addInst(.{
.tag = .bitcast,
@@ -6940,8 +7144,12 @@ fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
}
try sema.requireRuntimeBlock(block, src, operand_src);
- // TODO insert safety check to make sure the value matches an enum value
- return block.addTyOp(.intcast, dest_ty, operand);
+ const result = try block.addTyOp(.intcast, dest_ty, operand);
+ if (block.wantSafety() and !dest_ty.isNonexhaustiveEnum() and sema.mod.comp.bin_file.options.use_llvm) {
+ const ok = try block.addUnOp(.is_named_enum_value, result);
+ try sema.addSafetyCheck(block, ok, .invalid_enum_value);
+ }
+ return result;
}
/// Pointer in, pointer out.
@@ -7048,6 +7256,8 @@ fn zirOptionalPayload(
if (operand_ty.ptrSize() != .C) {
return sema.failWithExpectedOptionalType(block, src, operand_ty);
}
+ // TODO https://github.com/ziglang/zig/issues/6597
+ if (true) break :t operand_ty;
const ptr_info = operand_ty.ptrInfo().data;
break :t try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = try ptr_info.pointee_type.copy(sema.arena),
@@ -7425,10 +7635,11 @@ fn handleExternLibName(
) CompileError![:0]u8 {
blk: {
const mod = sema.mod;
+ const comp = mod.comp;
const target = mod.getTarget();
log.debug("extern fn symbol expected in lib '{s}'", .{lib_name});
if (target_util.is_libc_lib_name(target, lib_name)) {
- if (!mod.comp.bin_file.options.link_libc) {
+ if (!comp.bin_file.options.link_libc and !comp.bin_file.options.parent_compilation_link_libc) {
return sema.fail(
block,
src_loc,
@@ -7436,11 +7647,11 @@ fn handleExternLibName(
.{},
);
}
- mod.comp.bin_file.options.link_libc = true;
+ comp.bin_file.options.link_libc = true;
break :blk;
}
if (target_util.is_libcpp_lib_name(target, lib_name)) {
- if (!mod.comp.bin_file.options.link_libcpp) {
+ if (!comp.bin_file.options.link_libcpp) {
return sema.fail(
block,
src_loc,
@@ -7448,14 +7659,14 @@ fn handleExternLibName(
.{},
);
}
- mod.comp.bin_file.options.link_libcpp = true;
+ comp.bin_file.options.link_libcpp = true;
break :blk;
}
if (mem.eql(u8, lib_name, "unwind")) {
- mod.comp.bin_file.options.link_libunwind = true;
+ comp.bin_file.options.link_libunwind = true;
break :blk;
}
- if (!target.isWasm() and !mod.comp.bin_file.options.pic) {
+ if (!target.isWasm() and !comp.bin_file.options.pic) {
return sema.fail(
block,
src_loc,
@@ -7463,7 +7674,7 @@ fn handleExternLibName(
.{ lib_name, lib_name },
);
}
- mod.comp.stage1AddLinkLib(lib_name) catch |err| {
+ comp.stage1AddLinkLib(lib_name) catch |err| {
return sema.fail(block, src_loc, "unable to add link lib '{s}': {s}", .{
lib_name, @errorName(err),
});
@@ -7502,7 +7713,6 @@ fn funcCommon(
noalias_bits: u32,
is_noinline: bool,
) CompileError!Air.Inst.Ref {
- const fn_src = LazySrcLoc.nodeOffset(src_node_offset);
const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = src_node_offset };
const cc_src: LazySrcLoc = .{ .node_offset_fn_type_cc = src_node_offset };
@@ -7573,27 +7783,25 @@ fn funcCommon(
param_types[i] = param.ty;
sema.analyzeParameter(
block,
- fn_src,
.unneeded,
param,
comptime_params,
i,
&is_generic,
- is_extern,
cc_workaround,
+ has_body,
) catch |err| switch (err) {
error.NeededSourceLocation => {
const decl = sema.mod.declPtr(block.src_decl);
try sema.analyzeParameter(
block,
- fn_src,
Module.paramSrc(src_node_offset, sema.gpa, decl, i),
param,
comptime_params,
i,
&is_generic,
- is_extern,
cc_workaround,
+ has_body,
);
return error.AnalysisFail;
},
@@ -7601,18 +7809,17 @@ fn funcCommon(
};
}
- const ret_poison = if (!is_generic) rp: {
- if (sema.typeRequiresComptime(block, ret_ty_src, bare_return_type)) |ret_comptime| {
- is_generic = ret_comptime;
- break :rp bare_return_type.tag() == .generic_poison;
- } else |err| switch (err) {
- error.GenericPoison => {
- is_generic = true;
- break :rp true;
- },
- else => |e| return e,
- }
- } else bare_return_type.tag() == .generic_poison;
+ var ret_ty_requires_comptime = false;
+ const ret_poison = if (sema.typeRequiresComptime(block, ret_ty_src, bare_return_type)) |ret_comptime| rp: {
+ ret_ty_requires_comptime = ret_comptime;
+ break :rp bare_return_type.tag() == .generic_poison;
+ } else |err| switch (err) {
+ error.GenericPoison => rp: {
+ is_generic = true;
+ break :rp true;
+ },
+ else => |e| return e,
+ };
const return_type = if (!inferred_error_set or ret_poison)
bare_return_type
@@ -7657,6 +7864,41 @@ fn funcCommon(
return sema.failWithOwnedErrorMsg(msg);
}
+ // If the return type is comptime only but not dependent on parameters then all parameter types also need to be comptime
+ if (!sema.is_generic_instantiation and has_body and ret_ty_requires_comptime) comptime_check: {
+ for (block.params.items) |param| {
+ if (!param.is_comptime) break;
+ } else break :comptime_check;
+
+ const msg = try sema.errMsg(
+ block,
+ ret_ty_src,
+ "function with comptime only return type '{}' requires all parameters to be comptime",
+ .{return_type.fmt(sema.mod)},
+ );
+ try sema.explainWhyTypeIsComptime(block, ret_ty_src, msg, ret_ty_src.toSrcLoc(sema.owner_decl), return_type);
+
+ const tags = sema.code.instructions.items(.tag);
+ const data = sema.code.instructions.items(.data);
+ const param_body = sema.code.getParamBody(func_inst);
+ for (block.params.items) |param, i| {
+ if (!param.is_comptime) {
+ const param_index = param_body[i];
+ const param_src = switch (tags[param_index]) {
+ .param => data[param_index].pl_tok.src(),
+ .param_anytype => data[param_index].str_tok.src(),
+ else => unreachable,
+ };
+ if (param.name.len != 0) {
+ try sema.errNote(block, param_src, msg, "param '{s}' is required to be comptime", .{param.name});
+ } else {
+ try sema.errNote(block, param_src, msg, "param is required to be comptime", .{});
+ }
+ }
+ }
+ return sema.failWithOwnedErrorMsg(msg);
+ }
+
const arch = sema.mod.getTarget().cpu.arch;
if (switch (cc_workaround) {
.Unspecified, .C, .Naked, .Async, .Inline => null,
@@ -7699,6 +7941,9 @@ fn funcCommon(
if (cc_workaround == .Inline and is_noinline) {
return sema.fail(block, cc_src, "'noinline' function cannot have callconv 'Inline'", .{});
}
+ if (is_generic and sema.no_partial_func_ty) return error.GenericPoison;
+ for (comptime_params) |ct| is_generic = is_generic or ct;
+ is_generic = is_generic or ret_ty_requires_comptime;
break :fn_ty try Type.Tag.function.create(sema.arena, .{
.param_types = param_types,
@@ -7760,11 +8005,6 @@ fn funcCommon(
break :blk if (sema.comptime_args.len == 0) null else sema.comptime_args.ptr;
} else null;
- const param_names = try sema.gpa.alloc([:0]const u8, block.params.items.len);
- for (param_names) |*param_name, i| {
- param_name.* = try sema.gpa.dupeZ(u8, block.params.items[i].name);
- }
-
const hash = new_func.hash;
const fn_payload = try sema.arena.create(Value.Payload.Function);
new_func.* = .{
@@ -7772,13 +8012,11 @@ fn funcCommon(
.zir_body_inst = func_inst,
.owner_decl = sema.owner_decl_index,
.comptime_args = comptime_args,
- .anytype_args = undefined,
.hash = hash,
.lbrace_line = src_locs.lbrace_line,
.rbrace_line = src_locs.rbrace_line,
.lbrace_column = @truncate(u16, src_locs.columns),
.rbrace_column = @truncate(u16, src_locs.columns >> 16),
- .param_names = param_names,
.branch_quota = default_branch_quota,
.is_noinline = is_noinline,
};
@@ -7796,30 +8034,20 @@ fn funcCommon(
fn analyzeParameter(
sema: *Sema,
block: *Block,
- func_src: LazySrcLoc,
param_src: LazySrcLoc,
param: Block.Param,
comptime_params: []bool,
i: usize,
is_generic: *bool,
- is_extern: bool,
cc: std.builtin.CallingConvention,
+ has_body: bool,
) !void {
const requires_comptime = try sema.typeRequiresComptime(block, param_src, param.ty);
comptime_params[i] = param.is_comptime or requires_comptime;
- const this_generic = comptime_params[i] or param.ty.tag() == .generic_poison;
+ const this_generic = param.ty.tag() == .generic_poison;
is_generic.* = is_generic.* or this_generic;
- if (is_extern and this_generic) {
- // TODO this check should exist somewhere for notes.
- if (param_src == .unneeded) return error.NeededSourceLocation;
- const msg = msg: {
- const msg = try sema.errMsg(block, func_src, "extern function cannot be generic", .{});
- errdefer msg.destroy(sema.gpa);
-
- try sema.errNote(block, param_src, msg, "function is generic because of this parameter", .{});
- break :msg msg;
- };
- return sema.failWithOwnedErrorMsg(msg);
+ if (param.is_comptime and !Type.fnCallingConventionAllowsZigTypes(cc)) {
+ return sema.fail(block, param_src, "comptime parameters not allowed in function with calling convention '{s}'", .{@tagName(cc)});
}
if (this_generic and !Type.fnCallingConventionAllowsZigTypes(cc)) {
return sema.fail(block, param_src, "generic parameters not allowed in function with calling convention '{s}'", .{@tagName(cc)});
@@ -7852,9 +8080,9 @@ fn analyzeParameter(
};
return sema.failWithOwnedErrorMsg(msg);
}
- if (requires_comptime and !param.is_comptime) {
+ if (!sema.is_generic_instantiation and requires_comptime and !param.is_comptime and has_body) {
const msg = msg: {
- const msg = try sema.errMsg(block, param_src, "parametter of type '{}' must be declared comptime", .{
+ const msg = try sema.errMsg(block, param_src, "parameter of type '{}' must be declared comptime", .{
param.ty.fmt(sema.mod),
});
errdefer msg.destroy(sema.gpa);
@@ -7885,25 +8113,19 @@ fn zirParam(
// Make sure any nested param instructions don't clobber our work.
const prev_params = block.params;
const prev_preallocated_new_func = sema.preallocated_new_func;
+ const prev_no_partial_func_type = sema.no_partial_func_ty;
block.params = .{};
sema.preallocated_new_func = null;
+ sema.no_partial_func_ty = true;
defer {
block.params.deinit(sema.gpa);
block.params = prev_params;
sema.preallocated_new_func = prev_preallocated_new_func;
+ sema.no_partial_func_ty = prev_no_partial_func_type;
}
if (sema.resolveBody(block, body, inst)) |param_ty_inst| {
if (sema.analyzeAsType(block, src, param_ty_inst)) |param_ty| {
- if (param_ty.zigTypeTag() == .Fn and param_ty.fnInfo().is_generic) {
- // zirFunc will not emit error.GenericPoison to build a
- // partial type for generic functions but we still need to
- // detect if a function parameter is a generic function
- // to force the parent function to also be generic.
- if (!sema.inst_map.contains(inst)) {
- break :err error.GenericPoison;
- }
- }
break :param_ty param_ty;
} else |err| break :err err;
} else |err| break :err err;
@@ -7952,7 +8174,7 @@ fn zirParam(
try block.params.append(sema.gpa, .{
.ty = param_ty,
- .is_comptime = is_comptime,
+ .is_comptime = comptime_syntax,
.name = param_name,
});
const result = try sema.addConstant(param_ty, Value.initTag(.generic_poison));
@@ -8638,13 +8860,11 @@ fn zirSwitchCapture(
switch (operand_ty.zigTypeTag()) {
.Union => {
const union_obj = operand_ty.cast(Type.Payload.Union).?.data;
- const enum_ty = union_obj.tag_ty;
-
const first_item = try sema.resolveInst(items[0]);
// Previous switch validation ensured this will succeed
const first_item_val = sema.resolveConstValue(block, .unneeded, first_item, undefined) catch unreachable;
- const first_field_index = @intCast(u32, enum_ty.enumTagFieldIndex(first_item_val, sema.mod).?);
+ const first_field_index = @intCast(u32, operand_ty.unionTagFieldIndex(first_item_val, sema.mod).?);
const first_field = union_obj.fields.values()[first_field_index];
for (items[1..]) |item, i| {
@@ -8652,7 +8872,7 @@ fn zirSwitchCapture(
// Previous switch validation ensured this will succeed
const item_val = sema.resolveConstValue(block, .unneeded, item_ref, undefined) catch unreachable;
- const field_index = enum_ty.enumTagFieldIndex(item_val, sema.mod).?;
+ const field_index = operand_ty.unionTagFieldIndex(item_val, sema.mod).?;
const field = union_obj.fields.values()[field_index];
if (!field.ty.eql(first_field.ty, sema.mod)) {
const msg = msg: {
@@ -8776,6 +8996,9 @@ fn zirSwitchCond(
.ErrorSet,
.Enum,
=> {
+ if (operand_ty.isSlice()) {
+ return sema.fail(block, src, "switch on type '{}'", .{operand_ty.fmt(sema.mod)});
+ }
if ((try sema.typeHasOnePossibleValue(block, operand_src, operand_ty))) |opv| {
return sema.addConstant(operand_ty, opv);
}
@@ -8852,12 +9075,17 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
},
};
- const union_originally = blk: {
+ const maybe_union_ty = blk: {
const zir_data = sema.code.instructions.items(.data);
const cond_index = Zir.refToIndex(extra.data.operand).?;
const raw_operand = sema.resolveInst(zir_data[cond_index].un_node.operand) catch unreachable;
- break :blk sema.typeOf(raw_operand).zigTypeTag() == .Union;
+ break :blk sema.typeOf(raw_operand);
};
+ const union_originally = maybe_union_ty.zigTypeTag() == .Union;
+ var seen_union_fields: []?Module.SwitchProngSrc = &.{};
+ defer gpa.free(seen_union_fields);
+
+ var empty_enum = false;
const operand_ty = sema.typeOf(operand);
@@ -8892,7 +9120,9 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
.Union => unreachable, // handled in zirSwitchCond
.Enum => {
var seen_fields = try gpa.alloc(?Module.SwitchProngSrc, operand_ty.enumFieldCount());
- defer gpa.free(seen_fields);
+ empty_enum = seen_fields.len == 0 and !operand_ty.isNonexhaustiveEnum();
+ defer if (!union_originally) gpa.free(seen_fields);
+ if (union_originally) seen_union_fields = seen_fields;
mem.set(?Module.SwitchProngSrc, seen_fields, null);
// This is used for non-exhaustive enum values that do not correspond to any tags.
@@ -9486,6 +9716,9 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
}
if (scalar_cases_len + multi_cases_len == 0) {
+ if (empty_enum) {
+ return Air.Inst.Ref.void_value;
+ }
if (special_prong == .none) {
return sema.fail(block, src, "switch must handle all possibilities", .{});
}
@@ -9525,18 +9758,28 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const item = try sema.resolveInst(item_ref);
// `item` is already guaranteed to be constant known.
- _ = sema.analyzeBodyInner(&case_block, body) catch |err| switch (err) {
- error.ComptimeBreak => {
- const zir_datas = sema.code.instructions.items(.data);
- const break_data = zir_datas[sema.comptime_break_inst].@"break";
- try sema.addRuntimeBreak(&case_block, .{
- .block_inst = break_data.block_inst,
- .operand = break_data.operand,
- .inst = sema.comptime_break_inst,
- });
- },
- else => |e| return e,
- };
+ const analyze_body = if (union_originally) blk: {
+ const item_val = sema.resolveConstValue(block, .unneeded, item, undefined) catch unreachable;
+ const field_ty = maybe_union_ty.unionFieldType(item_val, sema.mod);
+ break :blk field_ty.zigTypeTag() != .NoReturn;
+ } else true;
+
+ if (analyze_body) {
+ _ = sema.analyzeBodyInner(&case_block, body) catch |err| switch (err) {
+ error.ComptimeBreak => {
+ const zir_datas = sema.code.instructions.items(.data);
+ const break_data = zir_datas[sema.comptime_break_inst].@"break";
+ try sema.addRuntimeBreak(&case_block, .{
+ .block_inst = break_data.block_inst,
+ .operand = break_data.operand,
+ .inst = sema.comptime_break_inst,
+ });
+ },
+ else => |e| return e,
+ };
+ } else {
+ _ = try case_block.addNoOp(.unreach);
+ }
try wip_captures.finalize();
@@ -9577,20 +9820,34 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
if (ranges_len == 0) {
cases_len += 1;
+ const analyze_body = if (union_originally)
+ for (items) |item_ref| {
+ const item = try sema.resolveInst(item_ref);
+ const item_val = sema.resolveConstValue(block, .unneeded, item, undefined) catch unreachable;
+ const field_ty = maybe_union_ty.unionFieldType(item_val, sema.mod);
+ if (field_ty.zigTypeTag() != .NoReturn) break true;
+ } else false
+ else
+ true;
+
const body = sema.code.extra[extra_index..][0..body_len];
extra_index += body_len;
- _ = sema.analyzeBodyInner(&case_block, body) catch |err| switch (err) {
- error.ComptimeBreak => {
- const zir_datas = sema.code.instructions.items(.data);
- const break_data = zir_datas[sema.comptime_break_inst].@"break";
- try sema.addRuntimeBreak(&case_block, .{
- .block_inst = break_data.block_inst,
- .operand = break_data.operand,
- .inst = sema.comptime_break_inst,
- });
- },
- else => |e| return e,
- };
+ if (analyze_body) {
+ _ = sema.analyzeBodyInner(&case_block, body) catch |err| switch (err) {
+ error.ComptimeBreak => {
+ const zir_datas = sema.code.instructions.items(.data);
+ const break_data = zir_datas[sema.comptime_break_inst].@"break";
+ try sema.addRuntimeBreak(&case_block, .{
+ .block_inst = break_data.block_inst,
+ .operand = break_data.operand,
+ .inst = sema.comptime_break_inst,
+ });
+ },
+ else => |e| return e,
+ };
+ } else {
+ _ = try case_block.addNoOp(.unreach);
+ }
try cases_extra.ensureUnusedCapacity(gpa, 2 + items.len +
case_block.instructions.items.len);
@@ -9705,14 +9962,24 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
}
var final_else_body: []const Air.Inst.Index = &.{};
- if (special.body.len != 0 or !is_first) {
+ if (special.body.len != 0 or !is_first or case_block.wantSafety()) {
var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, child_block.wip_capture_scope);
defer wip_captures.deinit();
case_block.instructions.shrinkRetainingCapacity(0);
case_block.wip_capture_scope = wip_captures.scope;
- if (special.body.len != 0) {
+ const analyze_body = if (union_originally)
+ for (seen_union_fields) |seen_field, index| {
+ if (seen_field != null) continue;
+ const union_obj = maybe_union_ty.cast(Type.Payload.Union).?.data;
+ const field_ty = union_obj.fields.values()[index].ty;
+ if (field_ty.zigTypeTag() != .NoReturn) break true;
+ } else false
+ else
+ true;
+
+ if (special.body.len != 0 and analyze_body) {
_ = sema.analyzeBodyInner(&case_block, special.body) catch |err| switch (err) {
error.ComptimeBreak => {
const zir_datas = sema.code.instructions.items(.data);
@@ -9728,9 +9995,11 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
} else {
// We still need a terminator in this block, but we have proven
// that it is unreachable.
- // TODO this should be a special safety panic other than unreachable, something
- // like "panic: switch operand had corrupt value not allowed by the type"
- try case_block.addUnreachable(src, true);
+ if (case_block.wantSafety()) {
+ _ = try sema.safetyPanic(&case_block, src, .corrupt_switch);
+ } else {
+ _ = try case_block.addNoOp(.unreach);
+ }
}
try wip_captures.finalize();
@@ -10194,16 +10463,14 @@ fn zirShl(
const val = switch (air_tag) {
.shl_exact => val: {
- const shifted = try lhs_val.shl(rhs_val, lhs_ty, sema.arena, target);
+ const shifted = try lhs_val.shlWithOverflow(rhs_val, lhs_ty, sema.arena, target);
if (scalar_ty.zigTypeTag() == .ComptimeInt) {
- break :val shifted;
+ break :val shifted.wrapped_result;
}
- const int_info = scalar_ty.intInfo(target);
- const truncated = try shifted.intTrunc(lhs_ty, sema.arena, int_info.signedness, int_info.bits, target);
- if (try sema.compare(block, src, truncated, .eq, shifted, lhs_ty)) {
- break :val shifted;
+ if (shifted.overflowed.compareWithZero(.eq)) {
+ break :val shifted.wrapped_result;
}
- return sema.addConstUndef(lhs_ty);
+ return sema.fail(block, src, "operation caused overflow", .{});
},
.shl_sat => if (scalar_ty.zigTypeTag() == .ComptimeInt)
@@ -10239,34 +10506,57 @@ fn zirShl(
} else rhs;
try sema.requireRuntimeBlock(block, src, runtime_src);
- if (block.wantSafety() and air_tag == .shl_exact) {
- const op_ov_tuple_ty = try sema.overflowArithmeticTupleType(lhs_ty);
- const op_ov = try block.addInst(.{
- .tag = .shl_with_overflow,
- .data = .{ .ty_pl = .{
- .ty = try sema.addType(op_ov_tuple_ty),
- .payload = try sema.addExtra(Air.Bin{
- .lhs = lhs,
- .rhs = rhs,
- }),
- } },
- });
- const ov_bit = try sema.tupleFieldValByIndex(block, src, op_ov, 1, op_ov_tuple_ty);
- const any_ov_bit = if (lhs_ty.zigTypeTag() == .Vector)
- try block.addInst(.{
- .tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
- .data = .{ .reduce = .{
- .operand = ov_bit,
- .operation = .Or,
- } },
- })
- else
- ov_bit;
- const zero_ov = try sema.addConstant(Type.@"u1", Value.zero);
- const no_ov = try block.addBinOp(.cmp_eq, any_ov_bit, zero_ov);
+ if (block.wantSafety()) {
+ const bit_count = scalar_ty.intInfo(target).bits;
+ if (!std.math.isPowerOfTwo(bit_count)) {
+ const bit_count_val = try Value.Tag.int_u64.create(sema.arena, bit_count);
- try sema.addSafetyCheck(block, no_ov, .shl_overflow);
- return sema.tupleFieldValByIndex(block, src, op_ov, 0, op_ov_tuple_ty);
+ const ok = if (rhs_ty.zigTypeTag() == .Vector) ok: {
+ const bit_count_inst = try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, bit_count_val));
+ const lt = try block.addCmpVector(rhs, bit_count_inst, .lt, try sema.addType(rhs_ty));
+ break :ok try block.addInst(.{
+ .tag = .reduce,
+ .data = .{ .reduce = .{
+ .operand = lt,
+ .operation = .And,
+ } },
+ });
+ } else ok: {
+ const bit_count_inst = try sema.addConstant(rhs_ty, bit_count_val);
+ break :ok try block.addBinOp(.cmp_lt, rhs, bit_count_inst);
+ };
+ try sema.addSafetyCheck(block, ok, .shift_rhs_too_big);
+ }
+
+ if (air_tag == .shl_exact) {
+ const op_ov_tuple_ty = try sema.overflowArithmeticTupleType(lhs_ty);
+ const op_ov = try block.addInst(.{
+ .tag = .shl_with_overflow,
+ .data = .{ .ty_pl = .{
+ .ty = try sema.addType(op_ov_tuple_ty),
+ .payload = try sema.addExtra(Air.Bin{
+ .lhs = lhs,
+ .rhs = rhs,
+ }),
+ } },
+ });
+ const ov_bit = try sema.tupleFieldValByIndex(block, src, op_ov, 1, op_ov_tuple_ty);
+ const any_ov_bit = if (lhs_ty.zigTypeTag() == .Vector)
+ try block.addInst(.{
+ .tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
+ .data = .{ .reduce = .{
+ .operand = ov_bit,
+ .operation = .Or,
+ } },
+ })
+ else
+ ov_bit;
+ const zero_ov = try sema.addConstant(Type.@"u1", Value.zero);
+ const no_ov = try block.addBinOp(.cmp_eq, any_ov_bit, zero_ov);
+
+ try sema.addSafetyCheck(block, no_ov, .shl_overflow);
+ return sema.tupleFieldValByIndex(block, src, op_ov, 0, op_ov_tuple_ty);
+ }
}
return block.addBinOp(air_tag, lhs, new_rhs);
}
@@ -10333,7 +10623,7 @@ fn zirShr(
// Detect if any ones would be shifted out.
const truncated = try lhs_val.intTruncBitsAsValue(lhs_ty, sema.arena, .unsigned, rhs_val, target);
if (!(try truncated.compareWithZeroAdvanced(.eq, sema.kit(block, src)))) {
- return sema.addConstUndef(lhs_ty);
+ return sema.fail(block, src, "exact shift shifted out 1 bits", .{});
}
}
const val = try lhs_val.shr(rhs_val, lhs_ty, sema.arena, target);
@@ -10345,20 +10635,43 @@ fn zirShr(
try sema.requireRuntimeBlock(block, src, runtime_src);
const result = try block.addBinOp(air_tag, lhs, rhs);
- if (block.wantSafety() and air_tag == .shr_exact) {
- const back = try block.addBinOp(.shl, result, rhs);
+ if (block.wantSafety()) {
+ const bit_count = scalar_ty.intInfo(target).bits;
+ if (!std.math.isPowerOfTwo(bit_count)) {
+ const bit_count_val = try Value.Tag.int_u64.create(sema.arena, bit_count);
- const ok = if (rhs_ty.zigTypeTag() == .Vector) ok: {
- const eql = try block.addCmpVector(lhs, back, .eq, try sema.addType(rhs_ty));
- break :ok try block.addInst(.{
- .tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
- .data = .{ .reduce = .{
- .operand = eql,
- .operation = .And,
- } },
- });
- } else try block.addBinOp(.cmp_eq, lhs, back);
- try sema.addSafetyCheck(block, ok, .shr_overflow);
+ const ok = if (rhs_ty.zigTypeTag() == .Vector) ok: {
+ const bit_count_inst = try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, bit_count_val));
+ const lt = try block.addCmpVector(rhs, bit_count_inst, .lt, try sema.addType(rhs_ty));
+ break :ok try block.addInst(.{
+ .tag = .reduce,
+ .data = .{ .reduce = .{
+ .operand = lt,
+ .operation = .And,
+ } },
+ });
+ } else ok: {
+ const bit_count_inst = try sema.addConstant(rhs_ty, bit_count_val);
+ break :ok try block.addBinOp(.cmp_lt, rhs, bit_count_inst);
+ };
+ try sema.addSafetyCheck(block, ok, .shift_rhs_too_big);
+ }
+
+ if (air_tag == .shr_exact) {
+ const back = try block.addBinOp(.shl, result, rhs);
+
+ const ok = if (rhs_ty.zigTypeTag() == .Vector) ok: {
+ const eql = try block.addCmpVector(lhs, back, .eq, try sema.addType(rhs_ty));
+ break :ok try block.addInst(.{
+ .tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
+ .data = .{ .reduce = .{
+ .operand = eql,
+ .operation = .And,
+ } },
+ });
+ } else try block.addBinOp(.cmp_eq, lhs, back);
+ try sema.addSafetyCheck(block, ok, .shr_overflow);
+ }
}
return result;
}
@@ -11040,6 +11353,21 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(block, lhs_src, casted_lhs);
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(block, rhs_src, casted_rhs);
+ if ((lhs_ty.zigTypeTag() == .ComptimeFloat and rhs_ty.zigTypeTag() == .ComptimeInt) or
+ (lhs_ty.zigTypeTag() == .ComptimeInt and rhs_ty.zigTypeTag() == .ComptimeFloat))
+ {
+ // If it makes a difference whether we coerce to ints or floats before doing the division, error.
+ // If lhs % rhs is 0, it doesn't matter.
+ const lhs_val = maybe_lhs_val orelse unreachable;
+ const rhs_val = maybe_rhs_val orelse unreachable;
+ const rem = lhs_val.floatRem(rhs_val, resolved_type, sema.arena, target) catch unreachable;
+ if (rem.compareWithZero(.neq)) {
+ return sema.fail(block, src, "ambiguous coercion of division operands '{s}' and '{s}'; non-zero remainder '{}'", .{
+ @tagName(lhs_ty.tag()), @tagName(rhs_ty.tag()), rem.fmtValue(resolved_type, sema.mod),
+ });
+ }
+ }
+
// TODO: emit compile error when .div is used on integers and there would be an
// ambiguous result between div_floor and div_trunc.
@@ -11130,7 +11458,12 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
try sema.addDivByZeroSafety(block, resolved_type, maybe_rhs_val, casted_rhs, is_int);
}
- const air_tag = if (is_int) Air.Inst.Tag.div_trunc else switch (block.float_mode) {
+ const air_tag = if (is_int) blk: {
+ if (lhs_ty.isSignedInt() or rhs_ty.isSignedInt()) {
+ return sema.fail(block, src, "division with '{s}' and '{s}': signed integers must use @divTrunc, @divFloor, or @divExact", .{ @tagName(lhs_ty.tag()), @tagName(rhs_ty.tag()) });
+ }
+ break :blk Air.Inst.Tag.div_trunc;
+ } else switch (block.float_mode) {
.Optimized => Air.Inst.Tag.div_float_optimized,
.Strict => Air.Inst.Tag.div_float,
};
@@ -11210,13 +11543,19 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
if (maybe_lhs_val) |lhs_val| {
if (maybe_rhs_val) |rhs_val| {
if (is_int) {
- // TODO: emit compile error if there is a remainder
+ const modulus_val = try lhs_val.intMod(rhs_val, resolved_type, sema.arena, target);
+ if (modulus_val.compareWithZero(.neq)) {
+ return sema.fail(block, src, "exact division produced remainder", .{});
+ }
return sema.addConstant(
resolved_type,
try lhs_val.intDiv(rhs_val, resolved_type, sema.arena, target),
);
} else {
- // TODO: emit compile error if there is a remainder
+ const modulus_val = try lhs_val.floatMod(rhs_val, resolved_type, sema.arena, target);
+ if (modulus_val.compareWithZero(.neq)) {
+ return sema.fail(block, src, "exact division produced remainder", .{});
+ }
return sema.addConstant(
resolved_type,
try lhs_val.floatDiv(rhs_val, resolved_type, sema.arena, target),
@@ -11634,6 +11973,395 @@ fn airTag(block: *Block, is_int: bool, normal: Air.Inst.Tag, optimized: Air.Inst
};
}
+fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
+ const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
+ const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
+ const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
+ const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
+ const lhs = try sema.resolveInst(extra.lhs);
+ const rhs = try sema.resolveInst(extra.rhs);
+ const lhs_ty = sema.typeOf(lhs);
+ const rhs_ty = sema.typeOf(rhs);
+ const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison();
+ const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison();
+ try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
+ try sema.checkInvalidPtrArithmetic(block, src, lhs_ty, .mod_rem);
+
+ const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
+ const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{
+ .override = &[_]LazySrcLoc{ lhs_src, rhs_src },
+ });
+
+ const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
+ const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
+
+ const lhs_scalar_ty = lhs_ty.scalarType();
+ const rhs_scalar_ty = rhs_ty.scalarType();
+ const scalar_tag = resolved_type.scalarType().zigTypeTag();
+
+ const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
+
+ try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .mod_rem);
+
+ const mod = sema.mod;
+ const target = mod.getTarget();
+ const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(block, lhs_src, casted_lhs);
+ const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(block, rhs_src, casted_rhs);
+
+ const runtime_src = rs: {
+ // For integers:
+ // Either operand being undef is a compile error because there exists
+ // a possible value (TODO what is it?) that would invoke illegal behavior.
+ // TODO: can lhs undef be handled better?
+ //
+ // For floats:
+ // If the rhs is zero, compile error for division by zero.
+ // If the rhs is undefined, compile error because there is a possible
+ // value (zero) for which the division would be illegal behavior.
+ // If the lhs is undefined, result is undefined.
+ //
+ // For either one: if the result would be different between @mod and @rem,
+ // then emit a compile error saying you have to pick one.
+ if (is_int) {
+ if (maybe_lhs_val) |lhs_val| {
+ if (lhs_val.isUndef()) {
+ return sema.failWithUseOfUndef(block, lhs_src);
+ }
+ if (try lhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src))) {
+ return sema.addConstant(resolved_type, Value.zero);
+ }
+ } else if (lhs_scalar_ty.isSignedInt()) {
+ return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty);
+ }
+ if (maybe_rhs_val) |rhs_val| {
+ if (rhs_val.isUndef()) {
+ return sema.failWithUseOfUndef(block, rhs_src);
+ }
+ if (try rhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src))) {
+ return sema.failWithDivideByZero(block, rhs_src);
+ }
+ if (maybe_lhs_val) |lhs_val| {
+ const rem_result = try sema.intRem(block, resolved_type, lhs_val, lhs_src, rhs_val, rhs_src);
+ // If this answer could possibly be different by doing `intMod`,
+ // we must emit a compile error. Otherwise, it's OK.
+ if ((try rhs_val.compareWithZeroAdvanced(.lt, sema.kit(block, src))) != (try lhs_val.compareWithZeroAdvanced(.lt, sema.kit(block, src))) and
+ !(try rem_result.compareWithZeroAdvanced(.eq, sema.kit(block, src))))
+ {
+ const bad_src = if (try lhs_val.compareWithZeroAdvanced(.lt, sema.kit(block, src)))
+ lhs_src
+ else
+ rhs_src;
+ return sema.failWithModRemNegative(block, bad_src, lhs_ty, rhs_ty);
+ }
+ if (try lhs_val.compareWithZeroAdvanced(.lt, sema.kit(block, src))) {
+ // Negative
+ return sema.addConstant(resolved_type, Value.zero);
+ }
+ return sema.addConstant(resolved_type, rem_result);
+ }
+ break :rs lhs_src;
+ } else if (rhs_scalar_ty.isSignedInt()) {
+ return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty);
+ } else {
+ break :rs rhs_src;
+ }
+ }
+ // float operands
+ if (maybe_rhs_val) |rhs_val| {
+ if (rhs_val.isUndef()) {
+ return sema.failWithUseOfUndef(block, rhs_src);
+ }
+ if (try rhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src))) {
+ return sema.failWithDivideByZero(block, rhs_src);
+ }
+ if (try rhs_val.compareWithZeroAdvanced(.lt, sema.kit(block, src))) {
+ return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty);
+ }
+ if (maybe_lhs_val) |lhs_val| {
+ if (lhs_val.isUndef() or (try lhs_val.compareWithZeroAdvanced(.lt, sema.kit(block, src)))) {
+ return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty);
+ }
+ return sema.addConstant(
+ resolved_type,
+ try lhs_val.floatRem(rhs_val, resolved_type, sema.arena, target),
+ );
+ } else {
+ return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty);
+ }
+ } else {
+ return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty);
+ }
+ };
+
+ try sema.requireRuntimeBlock(block, src, runtime_src);
+
+ if (block.wantSafety()) {
+ try sema.addDivByZeroSafety(block, resolved_type, maybe_rhs_val, casted_rhs, is_int);
+ }
+
+ const air_tag = airTag(block, is_int, .rem, .rem_optimized);
+ return block.addBinOp(air_tag, casted_lhs, casted_rhs);
+}
+
+fn intRem(
+ sema: *Sema,
+ block: *Block,
+ ty: Type,
+ lhs: Value,
+ lhs_src: LazySrcLoc,
+ rhs: Value,
+ rhs_src: LazySrcLoc,
+) CompileError!Value {
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try sema.arena.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try sema.intRemScalar(block, lhs.indexVectorlike(i), lhs_src, rhs.indexVectorlike(i), rhs_src);
+ }
+ return Value.Tag.aggregate.create(sema.arena, result_data);
+ }
+ return sema.intRemScalar(block, lhs, lhs_src, rhs, rhs_src);
+}
+
+fn intRemScalar(
+ sema: *Sema,
+ block: *Block,
+ lhs: Value,
+ lhs_src: LazySrcLoc,
+ rhs: Value,
+ rhs_src: LazySrcLoc,
+) CompileError!Value {
+ const target = sema.mod.getTarget();
+ // TODO is this a performance issue? maybe we should try the operation without
+ // resorting to BigInt first.
+ var lhs_space: Value.BigIntSpace = undefined;
+ var rhs_space: Value.BigIntSpace = undefined;
+ const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, target, sema.kit(block, lhs_src));
+ const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, target, sema.kit(block, rhs_src));
+ const limbs_q = try sema.arena.alloc(
+ math.big.Limb,
+ lhs_bigint.limbs.len,
+ );
+ const limbs_r = try sema.arena.alloc(
+ math.big.Limb,
+ // TODO: consider reworking Sema to re-use Values rather than
+ // always producing new Value objects.
+ rhs_bigint.limbs.len,
+ );
+ const limbs_buffer = try sema.arena.alloc(
+ math.big.Limb,
+ math.big.int.calcDivLimbsBufferLen(lhs_bigint.limbs.len, rhs_bigint.limbs.len),
+ );
+ var result_q = math.big.int.Mutable{ .limbs = limbs_q, .positive = undefined, .len = undefined };
+ var result_r = math.big.int.Mutable{ .limbs = limbs_r, .positive = undefined, .len = undefined };
+ result_q.divTrunc(&result_r, lhs_bigint, rhs_bigint, limbs_buffer);
+ return Value.fromBigInt(sema.arena, result_r.toConst());
+}
+
+fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
+ const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
+ const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
+ const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
+ const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
+ const lhs = try sema.resolveInst(extra.lhs);
+ const rhs = try sema.resolveInst(extra.rhs);
+ const lhs_ty = sema.typeOf(lhs);
+ const rhs_ty = sema.typeOf(rhs);
+ const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison();
+ const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison();
+ try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
+ try sema.checkInvalidPtrArithmetic(block, src, lhs_ty, .mod);
+
+ const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
+ const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{
+ .override = &[_]LazySrcLoc{ lhs_src, rhs_src },
+ });
+
+ const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
+ const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
+
+ const scalar_tag = resolved_type.scalarType().zigTypeTag();
+
+ const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
+
+ try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .mod);
+
+ const mod = sema.mod;
+ const target = mod.getTarget();
+ const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(block, lhs_src, casted_lhs);
+ const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(block, rhs_src, casted_rhs);
+
+ const runtime_src = rs: {
+ // For integers:
+ // Either operand being undef is a compile error because there exists
+ // a possible value (TODO what is it?) that would invoke illegal behavior.
+ // TODO: can lhs zero be handled better?
+ // TODO: can lhs undef be handled better?
+ //
+ // For floats:
+ // If the rhs is zero, compile error for division by zero.
+ // If the rhs is undefined, compile error because there is a possible
+ // value (zero) for which the division would be illegal behavior.
+ // If the lhs is undefined, result is undefined.
+ if (is_int) {
+ if (maybe_lhs_val) |lhs_val| {
+ if (lhs_val.isUndef()) {
+ return sema.failWithUseOfUndef(block, lhs_src);
+ }
+ }
+ if (maybe_rhs_val) |rhs_val| {
+ if (rhs_val.isUndef()) {
+ return sema.failWithUseOfUndef(block, rhs_src);
+ }
+ if (try rhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src))) {
+ return sema.failWithDivideByZero(block, rhs_src);
+ }
+ if (maybe_lhs_val) |lhs_val| {
+ return sema.addConstant(
+ resolved_type,
+ try lhs_val.intMod(rhs_val, resolved_type, sema.arena, target),
+ );
+ }
+ break :rs lhs_src;
+ } else {
+ break :rs rhs_src;
+ }
+ }
+ // float operands
+ if (maybe_rhs_val) |rhs_val| {
+ if (rhs_val.isUndef()) {
+ return sema.failWithUseOfUndef(block, rhs_src);
+ }
+ if (try rhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src))) {
+ return sema.failWithDivideByZero(block, rhs_src);
+ }
+ }
+ if (maybe_lhs_val) |lhs_val| {
+ if (lhs_val.isUndef()) {
+ return sema.addConstUndef(resolved_type);
+ }
+ if (maybe_rhs_val) |rhs_val| {
+ return sema.addConstant(
+ resolved_type,
+ try lhs_val.floatMod(rhs_val, resolved_type, sema.arena, target),
+ );
+ } else break :rs rhs_src;
+ } else break :rs lhs_src;
+ };
+
+ try sema.requireRuntimeBlock(block, src, runtime_src);
+
+ if (block.wantSafety()) {
+ try sema.addDivByZeroSafety(block, resolved_type, maybe_rhs_val, casted_rhs, is_int);
+ }
+
+ const air_tag = airTag(block, is_int, .mod, .mod_optimized);
+ return block.addBinOp(air_tag, casted_lhs, casted_rhs);
+}
+
+fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
+ const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
+ const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
+ const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
+ const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
+ const lhs = try sema.resolveInst(extra.lhs);
+ const rhs = try sema.resolveInst(extra.rhs);
+ const lhs_ty = sema.typeOf(lhs);
+ const rhs_ty = sema.typeOf(rhs);
+ const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison();
+ const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison();
+ try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
+ try sema.checkInvalidPtrArithmetic(block, src, lhs_ty, .rem);
+
+ const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
+ const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{
+ .override = &[_]LazySrcLoc{ lhs_src, rhs_src },
+ });
+
+ const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
+ const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
+
+ const scalar_tag = resolved_type.scalarType().zigTypeTag();
+
+ const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
+
+ try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .rem);
+
+ const mod = sema.mod;
+ const target = mod.getTarget();
+ const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(block, lhs_src, casted_lhs);
+ const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(block, rhs_src, casted_rhs);
+
+ const runtime_src = rs: {
+ // For integers:
+ // Either operand being undef is a compile error because there exists
+ // a possible value (TODO what is it?) that would invoke illegal behavior.
+ // TODO: can lhs zero be handled better?
+ // TODO: can lhs undef be handled better?
+ //
+ // For floats:
+ // If the rhs is zero, compile error for division by zero.
+ // If the rhs is undefined, compile error because there is a possible
+ // value (zero) for which the division would be illegal behavior.
+ // If the lhs is undefined, result is undefined.
+ if (is_int) {
+ if (maybe_lhs_val) |lhs_val| {
+ if (lhs_val.isUndef()) {
+ return sema.failWithUseOfUndef(block, lhs_src);
+ }
+ }
+ if (maybe_rhs_val) |rhs_val| {
+ if (rhs_val.isUndef()) {
+ return sema.failWithUseOfUndef(block, rhs_src);
+ }
+ if (try rhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src))) {
+ return sema.failWithDivideByZero(block, rhs_src);
+ }
+ if (maybe_lhs_val) |lhs_val| {
+ return sema.addConstant(
+ resolved_type,
+ try sema.intRem(block, resolved_type, lhs_val, lhs_src, rhs_val, rhs_src),
+ );
+ }
+ break :rs lhs_src;
+ } else {
+ break :rs rhs_src;
+ }
+ }
+ // float operands
+ if (maybe_rhs_val) |rhs_val| {
+ if (rhs_val.isUndef()) {
+ return sema.failWithUseOfUndef(block, rhs_src);
+ }
+ if (try rhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src))) {
+ return sema.failWithDivideByZero(block, rhs_src);
+ }
+ }
+ if (maybe_lhs_val) |lhs_val| {
+ if (lhs_val.isUndef()) {
+ return sema.addConstUndef(resolved_type);
+ }
+ if (maybe_rhs_val) |rhs_val| {
+ return sema.addConstant(
+ resolved_type,
+ try lhs_val.floatRem(rhs_val, resolved_type, sema.arena, target),
+ );
+ } else break :rs rhs_src;
+ } else break :rs lhs_src;
+ };
+
+ try sema.requireRuntimeBlock(block, src, runtime_src);
+
+ if (block.wantSafety()) {
+ try sema.addDivByZeroSafety(block, resolved_type, maybe_rhs_val, casted_rhs, is_int);
+ }
+
+ const air_tag = airTag(block, is_int, .rem, .rem_optimized);
+ return block.addBinOp(air_tag, casted_lhs, casted_rhs);
+}
+
fn zirOverflowArithmetic(
sema: *Sema,
block: *Block,
@@ -11895,8 +12623,6 @@ fn analyzeArithmetic(
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
- const lhs_scalar_ty = lhs_ty.scalarType();
- const rhs_scalar_ty = rhs_ty.scalarType();
const scalar_tag = resolved_type.scalarType().zigTypeTag();
const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
@@ -12242,206 +12968,6 @@ fn analyzeArithmetic(
} else break :rs .{ .src = lhs_src, .air_tag = .mul_sat };
} else break :rs .{ .src = rhs_src, .air_tag = .mul_sat };
},
- .mod_rem => {
- // For integers:
- // Either operand being undef is a compile error because there exists
- // a possible value (TODO what is it?) that would invoke illegal behavior.
- // TODO: can lhs undef be handled better?
- //
- // For floats:
- // If the rhs is zero, compile error for division by zero.
- // If the rhs is undefined, compile error because there is a possible
- // value (zero) for which the division would be illegal behavior.
- // If the lhs is undefined, result is undefined.
- //
- // For either one: if the result would be different between @mod and @rem,
- // then emit a compile error saying you have to pick one.
- if (is_int) {
- if (maybe_lhs_val) |lhs_val| {
- if (lhs_val.isUndef()) {
- return sema.failWithUseOfUndef(block, lhs_src);
- }
- if (try lhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src))) {
- return sema.addConstant(resolved_type, Value.zero);
- }
- } else if (lhs_scalar_ty.isSignedInt()) {
- return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty);
- }
- if (maybe_rhs_val) |rhs_val| {
- if (rhs_val.isUndef()) {
- return sema.failWithUseOfUndef(block, rhs_src);
- }
- if (try rhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src))) {
- return sema.failWithDivideByZero(block, rhs_src);
- }
- if (maybe_lhs_val) |lhs_val| {
- const rem_result = try lhs_val.intRem(rhs_val, resolved_type, sema.arena, target);
- // If this answer could possibly be different by doing `intMod`,
- // we must emit a compile error. Otherwise, it's OK.
- if ((try rhs_val.compareWithZeroAdvanced(.lt, sema.kit(block, src))) != (try lhs_val.compareWithZeroAdvanced(.lt, sema.kit(block, src))) and
- !(try rem_result.compareWithZeroAdvanced(.eq, sema.kit(block, src))))
- {
- const bad_src = if (try lhs_val.compareWithZeroAdvanced(.lt, sema.kit(block, src)))
- lhs_src
- else
- rhs_src;
- return sema.failWithModRemNegative(block, bad_src, lhs_ty, rhs_ty);
- }
- if (try lhs_val.compareWithZeroAdvanced(.lt, sema.kit(block, src))) {
- // Negative
- return sema.addConstant(resolved_type, Value.zero);
- }
- return sema.addConstant(resolved_type, rem_result);
- }
- break :rs .{ .src = lhs_src, .air_tag = .rem };
- } else if (rhs_scalar_ty.isSignedInt()) {
- return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty);
- } else {
- break :rs .{ .src = rhs_src, .air_tag = .rem };
- }
- }
- // float operands
- if (maybe_rhs_val) |rhs_val| {
- if (rhs_val.isUndef()) {
- return sema.failWithUseOfUndef(block, rhs_src);
- }
- if (try rhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src))) {
- return sema.failWithDivideByZero(block, rhs_src);
- }
- if (try rhs_val.compareWithZeroAdvanced(.lt, sema.kit(block, src))) {
- return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty);
- }
- if (maybe_lhs_val) |lhs_val| {
- if (lhs_val.isUndef() or (try lhs_val.compareWithZeroAdvanced(.lt, sema.kit(block, src)))) {
- return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty);
- }
- return sema.addConstant(
- resolved_type,
- try lhs_val.floatRem(rhs_val, resolved_type, sema.arena, target),
- );
- } else {
- return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty);
- }
- } else {
- return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty);
- }
- },
- .rem => {
- // For integers:
- // Either operand being undef is a compile error because there exists
- // a possible value (TODO what is it?) that would invoke illegal behavior.
- // TODO: can lhs zero be handled better?
- // TODO: can lhs undef be handled better?
- //
- // For floats:
- // If the rhs is zero, compile error for division by zero.
- // If the rhs is undefined, compile error because there is a possible
- // value (zero) for which the division would be illegal behavior.
- // If the lhs is undefined, result is undefined.
- if (is_int) {
- if (maybe_lhs_val) |lhs_val| {
- if (lhs_val.isUndef()) {
- return sema.failWithUseOfUndef(block, lhs_src);
- }
- }
- if (maybe_rhs_val) |rhs_val| {
- if (rhs_val.isUndef()) {
- return sema.failWithUseOfUndef(block, rhs_src);
- }
- if (try rhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src))) {
- return sema.failWithDivideByZero(block, rhs_src);
- }
- if (maybe_lhs_val) |lhs_val| {
- return sema.addConstant(
- resolved_type,
- try lhs_val.intRem(rhs_val, resolved_type, sema.arena, target),
- );
- }
- break :rs .{ .src = lhs_src, .air_tag = .rem };
- } else {
- break :rs .{ .src = rhs_src, .air_tag = .rem };
- }
- }
- // float operands
- if (maybe_rhs_val) |rhs_val| {
- if (rhs_val.isUndef()) {
- return sema.failWithUseOfUndef(block, rhs_src);
- }
- if (try rhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src))) {
- return sema.failWithDivideByZero(block, rhs_src);
- }
- }
- const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .rem_optimized else .rem;
- if (maybe_lhs_val) |lhs_val| {
- if (lhs_val.isUndef()) {
- return sema.addConstUndef(resolved_type);
- }
- if (maybe_rhs_val) |rhs_val| {
- return sema.addConstant(
- resolved_type,
- try lhs_val.floatRem(rhs_val, resolved_type, sema.arena, target),
- );
- } else break :rs .{ .src = rhs_src, .air_tag = air_tag };
- } else break :rs .{ .src = lhs_src, .air_tag = air_tag };
- },
- .mod => {
- // For integers:
- // Either operand being undef is a compile error because there exists
- // a possible value (TODO what is it?) that would invoke illegal behavior.
- // TODO: can lhs zero be handled better?
- // TODO: can lhs undef be handled better?
- //
- // For floats:
- // If the rhs is zero, compile error for division by zero.
- // If the rhs is undefined, compile error because there is a possible
- // value (zero) for which the division would be illegal behavior.
- // If the lhs is undefined, result is undefined.
- if (is_int) {
- if (maybe_lhs_val) |lhs_val| {
- if (lhs_val.isUndef()) {
- return sema.failWithUseOfUndef(block, lhs_src);
- }
- }
- if (maybe_rhs_val) |rhs_val| {
- if (rhs_val.isUndef()) {
- return sema.failWithUseOfUndef(block, rhs_src);
- }
- if (try rhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src))) {
- return sema.failWithDivideByZero(block, rhs_src);
- }
- if (maybe_lhs_val) |lhs_val| {
- return sema.addConstant(
- resolved_type,
- try lhs_val.intMod(rhs_val, resolved_type, sema.arena, target),
- );
- }
- break :rs .{ .src = lhs_src, .air_tag = .mod };
- } else {
- break :rs .{ .src = rhs_src, .air_tag = .mod };
- }
- }
- // float operands
- if (maybe_rhs_val) |rhs_val| {
- if (rhs_val.isUndef()) {
- return sema.failWithUseOfUndef(block, rhs_src);
- }
- if (try rhs_val.compareWithZeroAdvanced(.eq, sema.kit(block, src))) {
- return sema.failWithDivideByZero(block, rhs_src);
- }
- }
- const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .mod_optimized else .mod;
- if (maybe_lhs_val) |lhs_val| {
- if (lhs_val.isUndef()) {
- return sema.addConstUndef(resolved_type);
- }
- if (maybe_rhs_val) |rhs_val| {
- return sema.addConstant(
- resolved_type,
- try lhs_val.floatMod(rhs_val, resolved_type, sema.arena, target),
- );
- } else break :rs .{ .src = rhs_src, .air_tag = air_tag };
- } else break :rs .{ .src = lhs_src, .air_tag = air_tag };
- },
else => unreachable,
}
};
@@ -12485,33 +13011,6 @@ fn analyzeArithmetic(
return sema.tupleFieldValByIndex(block, src, op_ov, 0, op_ov_tuple_ty);
}
}
- switch (rs.air_tag) {
- .rem, .mod, .rem_optimized, .mod_optimized => {
- const ok = if (resolved_type.zigTypeTag() == .Vector) ok: {
- const zero_val = try Value.Tag.repeated.create(sema.arena, Value.zero);
- const zero = try sema.addConstant(sema.typeOf(casted_rhs), zero_val);
- const ok = try block.addCmpVector(casted_rhs, zero, if (scalar_tag == .Int) .gt else .neq, try sema.addType(resolved_type));
- break :ok try block.addInst(.{
- .tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
- .data = .{ .reduce = .{
- .operand = ok,
- .operation = .And,
- } },
- });
- } else ok: {
- const zero = try sema.addConstant(sema.typeOf(casted_rhs), Value.zero);
- const air_tag = if (scalar_tag == .Int)
- Air.Inst.Tag.cmp_gt
- else if (block.float_mode == .Optimized)
- Air.Inst.Tag.cmp_neq_optimized
- else
- Air.Inst.Tag.cmp_neq;
- break :ok try block.addBinOp(air_tag, casted_rhs, zero);
- };
- try sema.addSafetyCheck(block, ok, .remainder_division_zero_negative);
- },
- else => {},
- }
}
return block.addBinOp(rs.air_tag, casted_lhs, casted_rhs);
}
@@ -12557,7 +13056,7 @@ fn analyzePtrArithmetic(
// The resulting pointer is aligned to the lcd between the offset (an
// arbitrary number) and the alignment factor (always a power of two,
// non zero).
- const new_align = @as(u32, 1) << @intCast(u5, @ctz(u64, addend | ptr_info.@"align"));
+ const new_align = @as(u32, 1) << @intCast(u5, @ctz(addend | ptr_info.@"align"));
break :t try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = ptr_info.pointee_type,
@@ -12896,6 +13395,14 @@ fn analyzeCmpUnionTag(
const coerced_tag = try sema.coerce(block, union_tag_ty, tag, tag_src);
const coerced_union = try sema.coerce(block, union_tag_ty, un, un_src);
+ if (try sema.resolveMaybeUndefVal(block, tag_src, coerced_tag)) |enum_val| {
+ if (enum_val.isUndef()) return sema.addConstUndef(Type.bool);
+ const field_ty = union_ty.unionFieldType(enum_val, sema.mod);
+ if (field_ty.zigTypeTag() == .NoReturn) {
+ return Air.Inst.Ref.bool_false;
+ }
+ }
+
return sema.cmpSelf(block, src, coerced_union, coerced_tag, op, un_src, tag_src);
}
@@ -13961,10 +14468,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
else
field.default_val;
const default_val_ptr = try sema.optRefValue(block, src, field.ty, opt_default_val);
- const alignment = switch (layout) {
- .Auto, .Extern => field.normalAlignment(target),
- .Packed => 0,
- };
+ const alignment = field.alignment(target, layout);
struct_field_fields.* = .{
// name: []const u8,
@@ -14003,13 +14507,27 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, struct_ty.getNamespace());
- const field_values = try sema.arena.create([4]Value);
+ const backing_integer_val = blk: {
+ if (layout == .Packed) {
+ const struct_obj = struct_ty.castTag(.@"struct").?.data;
+ assert(struct_obj.haveLayout());
+ assert(struct_obj.backing_int_ty.isInt());
+ const backing_int_ty_val = try Value.Tag.ty.create(sema.arena, struct_obj.backing_int_ty);
+ break :blk try Value.Tag.opt_payload.create(sema.arena, backing_int_ty_val);
+ } else {
+ break :blk Value.initTag(.null_value);
+ }
+ };
+
+ const field_values = try sema.arena.create([5]Value);
field_values.* = .{
// layout: ContainerLayout,
try Value.Tag.enum_field_index.create(
sema.arena,
@enumToInt(layout),
),
+ // backing_integer: ?type,
+ backing_integer_val,
// fields: []const StructField,
fields_val,
// decls: []const Declaration,
@@ -14047,8 +14565,8 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
);
},
.BoundFn => @panic("TODO remove this type from the language and compiler"),
- .Frame => return sema.fail(block, src, "TODO: implement zirTypeInfo for Frame", .{}),
- .AnyFrame => return sema.fail(block, src, "TODO: implement zirTypeInfo for AnyFrame", .{}),
+ .Frame => return sema.failWithUseOfAsync(block, src),
+ .AnyFrame => return sema.failWithUseOfAsync(block, src),
}
}
@@ -14333,6 +14851,20 @@ fn zirBoolBr(
const rhs_result = try sema.resolveBody(rhs_block, body, inst);
_ = try rhs_block.addBr(block_inst, rhs_result);
+ return finishCondBr(sema, parent_block, &child_block, &then_block, &else_block, lhs, block_inst);
+}
+
+fn finishCondBr(
+ sema: *Sema,
+ parent_block: *Block,
+ child_block: *Block,
+ then_block: *Block,
+ else_block: *Block,
+ cond: Air.Inst.Ref,
+ block_inst: Air.Inst.Index,
+) !Air.Inst.Ref {
+ const gpa = sema.gpa;
+
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).Struct.fields.len +
then_block.instructions.items.len + else_block.instructions.items.len +
@typeInfo(Air.Block).Struct.fields.len + child_block.instructions.items.len + 1);
@@ -14345,7 +14877,7 @@ fn zirBoolBr(
sema.air_extra.appendSliceAssumeCapacity(else_block.instructions.items);
_ = try child_block.addInst(.{ .tag = .cond_br, .data = .{ .pl_op = .{
- .operand = lhs,
+ .operand = cond,
.payload = cond_br_payload,
} } });
@@ -14715,10 +15247,83 @@ fn zirRetLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir
const operand = try sema.analyzeLoad(block, src, ret_ptr, src);
return sema.analyzeRet(block, operand, src);
}
+
+ if (sema.wantErrorReturnTracing()) {
+ const is_non_err = try sema.analyzePtrIsNonErr(block, src, ret_ptr);
+ return retWithErrTracing(sema, block, src, is_non_err, .ret_load, ret_ptr);
+ }
+
_ = try block.addUnOp(.ret_load, ret_ptr);
return always_noreturn;
}
+fn retWithErrTracing(
+ sema: *Sema,
+ block: *Block,
+ src: LazySrcLoc,
+ is_non_err: Air.Inst.Ref,
+ ret_tag: Air.Inst.Tag,
+ operand: Air.Inst.Ref,
+) CompileError!Zir.Inst.Index {
+ const need_check = switch (is_non_err) {
+ .bool_true => {
+ _ = try block.addUnOp(ret_tag, operand);
+ return always_noreturn;
+ },
+ .bool_false => false,
+ else => true,
+ };
+ const gpa = sema.gpa;
+ const unresolved_stack_trace_ty = try sema.getBuiltinType(block, src, "StackTrace");
+ const stack_trace_ty = try sema.resolveTypeFields(block, src, unresolved_stack_trace_ty);
+ const ptr_stack_trace_ty = try Type.Tag.single_mut_pointer.create(sema.arena, stack_trace_ty);
+ const err_return_trace = try block.addTy(.err_return_trace, ptr_stack_trace_ty);
+ const return_err_fn = try sema.getBuiltin(block, src, "returnError");
+ const args: [1]Air.Inst.Ref = .{err_return_trace};
+
+ if (!need_check) {
+ _ = try sema.analyzeCall(block, return_err_fn, src, src, .never_inline, false, &args, null);
+ _ = try block.addUnOp(ret_tag, operand);
+ return always_noreturn;
+ }
+
+ var then_block = block.makeSubBlock();
+ defer then_block.instructions.deinit(gpa);
+ _ = try then_block.addUnOp(ret_tag, operand);
+
+ var else_block = block.makeSubBlock();
+ defer else_block.instructions.deinit(gpa);
+ _ = try sema.analyzeCall(&else_block, return_err_fn, src, src, .never_inline, false, &args, null);
+ _ = try else_block.addUnOp(ret_tag, operand);
+
+ try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).Struct.fields.len +
+ then_block.instructions.items.len + else_block.instructions.items.len +
+ @typeInfo(Air.Block).Struct.fields.len + 1);
+
+ const cond_br_payload = sema.addExtraAssumeCapacity(Air.CondBr{
+ .then_body_len = @intCast(u32, then_block.instructions.items.len),
+ .else_body_len = @intCast(u32, else_block.instructions.items.len),
+ });
+ sema.air_extra.appendSliceAssumeCapacity(then_block.instructions.items);
+ sema.air_extra.appendSliceAssumeCapacity(else_block.instructions.items);
+
+ _ = try block.addInst(.{ .tag = .cond_br, .data = .{ .pl_op = .{
+ .operand = is_non_err,
+ .payload = cond_br_payload,
+ } } });
+
+ return always_noreturn;
+}
+
+fn wantErrorReturnTracing(sema: *Sema) bool {
+ // TODO implement this feature in all the backends and then delete this check.
+ const backend_supports_error_return_tracing = sema.mod.comp.bin_file.options.use_llvm;
+
+ return sema.fn_ret_ty.isError() and
+ sema.mod.comp.bin_file.options.error_return_tracing and
+ backend_supports_error_return_tracing;
+}
+
fn addToInferredErrorSet(sema: *Sema, uncasted_operand: Air.Inst.Ref) !void {
assert(sema.fn_ret_ty.zigTypeTag() == .ErrorUnion);
@@ -14764,27 +15369,15 @@ fn analyzeRet(
return always_noreturn;
}
- // TODO implement this feature in all the backends and then delete this check.
- const backend_supports_error_return_tracing =
- sema.mod.comp.bin_file.options.use_llvm;
+ try sema.resolveTypeLayout(block, src, sema.fn_ret_ty);
- if (sema.fn_ret_ty.isError() and
- sema.mod.comp.bin_file.options.error_return_tracing and
- backend_supports_error_return_tracing)
- ret_err: {
- if (try sema.resolveMaybeUndefVal(block, src, operand)) |ret_val| {
- if (ret_val.tag() != .@"error") break :ret_err;
- }
- const return_err_fn = try sema.getBuiltin(block, src, "returnError");
- const unresolved_stack_trace_ty = try sema.getBuiltinType(block, src, "StackTrace");
- const stack_trace_ty = try sema.resolveTypeFields(block, src, unresolved_stack_trace_ty);
- const ptr_stack_trace_ty = try Type.Tag.optional_single_mut_pointer.create(sema.arena, stack_trace_ty);
- const err_return_trace = try block.addTy(.err_return_trace, ptr_stack_trace_ty);
- const args: [1]Air.Inst.Ref = .{err_return_trace};
- _ = try sema.analyzeCall(block, return_err_fn, src, src, .never_inline, false, &args, null);
+ if (sema.wantErrorReturnTracing()) {
+ // Avoid adding a frame to the error return trace in case the value is comptime-known
+ // to be not an error.
+ const is_non_err = try sema.analyzeIsNonErr(block, src, operand);
+ return retWithErrTracing(sema, block, src, is_non_err, .ret, operand);
}
- try sema.resolveTypeLayout(block, src, sema.fn_ret_ty);
_ = try block.addUnOp(.ret, operand);
return always_noreturn;
}
@@ -15015,7 +15608,9 @@ fn unionInit(
const init = try sema.coerce(block, field.ty, uncasted_init, init_src);
if (try sema.resolveMaybeUndefVal(block, init_src, init)) |init_val| {
- const tag_val = try Value.Tag.enum_field_index.create(sema.arena, field_index);
+ const tag_ty = union_ty.unionTagTypeHypothetical();
+ const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name).?);
+ const tag_val = try Value.Tag.enum_field_index.create(sema.arena, enum_field_index);
return sema.addConstant(union_ty, try Value.Tag.@"union".create(sema.arena, .{
.tag = tag_val,
.val = init_val,
@@ -15113,7 +15708,9 @@ fn zirStructInit(
const field_type_extra = sema.code.extraData(Zir.Inst.FieldType, field_type_data.payload_index).data;
const field_name = sema.code.nullTerminatedString(field_type_extra.name_start);
const field_index = try sema.unionFieldIndex(block, resolved_ty, field_name, field_src);
- const tag_val = try Value.Tag.enum_field_index.create(sema.arena, field_index);
+ const tag_ty = resolved_ty.unionTagTypeHypothetical();
+ const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name).?);
+ const tag_val = try Value.Tag.enum_field_index.create(sema.arena, enum_field_index);
const init_inst = try sema.resolveInst(item.data.init);
if (try sema.resolveMaybeUndefVal(block, field_src, init_inst)) |val| {
@@ -15161,6 +15758,8 @@ fn finishStructInit(
const gpa = sema.gpa;
var root_msg: ?*Module.ErrorMsg = null;
+ errdefer if (root_msg) |msg| msg.destroy(sema.gpa);
+
if (struct_ty.isAnonStruct()) {
const struct_obj = struct_ty.castTag(.anon_struct).?.data;
for (struct_obj.values) |default_val, i| {
@@ -15216,6 +15815,7 @@ fn finishStructInit(
}
if (root_msg) |msg| {
+ root_msg = null;
if (struct_ty.castTag(.@"struct")) |struct_obj| {
const fqn = try struct_obj.data.getFullyQualifiedName(sema.mod);
defer gpa.free(fqn);
@@ -15245,6 +15845,7 @@ fn finishStructInit(
}
if (is_ref) {
+ try sema.resolveStructLayout(block, dest_src, struct_ty);
const target = sema.mod.getTarget();
const alloc_ty = try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = struct_ty,
@@ -15619,9 +16220,10 @@ fn fieldType(
field_src: LazySrcLoc,
ty_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
- const resolved_ty = try sema.resolveTypeFields(block, ty_src, aggregate_ty);
- var cur_ty = resolved_ty;
+ var cur_ty = aggregate_ty;
while (true) {
+ const resolved_ty = try sema.resolveTypeFields(block, ty_src, cur_ty);
+ cur_ty = resolved_ty;
switch (cur_ty.zigTypeTag()) {
.Struct => {
if (cur_ty.isAnonStruct()) {
@@ -15693,7 +16295,7 @@ fn zirFrame(
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
const src = LazySrcLoc.nodeOffset(@bitCast(i32, extended.operand));
- return sema.fail(block, src, "TODO: Sema.zirFrame", .{});
+ return sema.failWithUseOfAsync(block, src);
}
fn zirAlignOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -15742,7 +16344,7 @@ fn zirUnaryMath(
block: *Block,
inst: Zir.Inst.Index,
air_tag: Air.Inst.Tag,
- eval: fn (Value, Type, Allocator, std.Target) Allocator.Error!Value,
+ comptime eval: fn (Value, Type, Allocator, std.Target) Allocator.Error!Value,
) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
@@ -15853,25 +16455,30 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const field_name = enum_ty.enumFieldName(field_index);
return sema.addStrLit(block, field_name);
}
+ try sema.requireRuntimeBlock(block, src, operand_src);
+ if (block.wantSafety() and sema.mod.comp.bin_file.options.use_llvm) {
+ const ok = try block.addUnOp(.is_named_enum_value, casted_operand);
+ try sema.addSafetyCheck(block, ok, .invalid_enum_value);
+ }
// In case the value is runtime-known, we have an AIR instruction for this instead
// of trying to lower it in Sema because an optimization pass may result in the operand
// being comptime-known, which would let us elide the `tag_name` AIR instruction.
return block.addUnOp(.tag_name, casted_operand);
}
-fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const mod = sema.mod;
- const inst_data = sema.code.instructions.items(.data)[inst].un_node;
- const src = inst_data.src();
+ const name_strategy = @intToEnum(Zir.Inst.NameStrategy, extended.small);
+ const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
+ const src = LazySrcLoc.nodeOffset(extra.node);
const type_info_ty = try sema.resolveBuiltinTypeFields(block, src, "Type");
- const uncasted_operand = try sema.resolveInst(inst_data.operand);
- const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
+ const uncasted_operand = try sema.resolveInst(extra.operand);
+ const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
const type_info = try sema.coerce(block, type_info_ty, uncasted_operand, operand_src);
const val = try sema.resolveConstValue(block, operand_src, type_info, "operand to @Type must be comptime known");
const union_val = val.cast(Value.Payload.Union).?.data;
- const tag_ty = type_info_ty.unionTagType().?;
const target = mod.getTarget();
- const tag_index = tag_ty.enumTagFieldIndex(union_val.tag, mod).?;
+ const tag_index = type_info_ty.unionTagFieldIndex(union_val.tag, mod).?;
if (union_val.val.anyUndef()) return sema.failWithUseOfUndef(block, src);
switch (@intToEnum(std.builtin.TypeId, tag_index)) {
.Type => return Air.Inst.Ref.type_type,
@@ -15882,7 +16489,7 @@ fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I
.ComptimeInt => return Air.Inst.Ref.comptime_int_type,
.Undefined => return Air.Inst.Ref.undefined_type,
.Null => return Air.Inst.Ref.null_type,
- .AnyFrame => return Air.Inst.Ref.anyframe_type,
+ .AnyFrame => return sema.failWithUseOfAsync(block, src),
.EnumLiteral => return Air.Inst.Ref.enum_literal_type,
.Int => {
const struct_val = union_val.val.castTag(.aggregate).?.data;
@@ -15945,7 +16552,7 @@ fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I
if (!try sema.intFitsInType(block, src, alignment_val, Type.u32, null)) {
return sema.fail(block, src, "alignment must fit in 'u32'", .{});
}
- const abi_align = @intCast(u29, alignment_val.toUnsignedInt(target));
+ const abi_align = @intCast(u29, (try alignment_val.getUnsignedIntAdvanced(target, sema.kit(block, src))).?);
var buffer: Value.ToTypeBuffer = undefined;
const unresolved_elem_ty = child_val.toType(&buffer);
@@ -16110,22 +16717,31 @@ fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I
const struct_val = union_val.val.castTag(.aggregate).?.data;
// layout: containerlayout,
const layout_val = struct_val[0];
+ // backing_int: ?type,
+ const backing_int_val = struct_val[1];
// fields: []const enumfield,
- const fields_val = struct_val[1];
+ const fields_val = struct_val[2];
// decls: []const declaration,
- const decls_val = struct_val[2];
+ const decls_val = struct_val[3];
// is_tuple: bool,
- const is_tuple_val = struct_val[3];
+ const is_tuple_val = struct_val[4];
+ assert(struct_val.len == 5);
+
+ const layout = layout_val.toEnum(std.builtin.Type.ContainerLayout);
// Decls
if (decls_val.sliceLen(mod) > 0) {
return sema.fail(block, src, "reified structs must have no decls", .{});
}
+ if (layout != .Packed and !backing_int_val.isNull()) {
+ return sema.fail(block, src, "non-packed struct does not support backing integer type", .{});
+ }
+
return if (is_tuple_val.toBool())
try sema.reifyTuple(block, src, fields_val)
else
- try sema.reifyStruct(block, inst, src, layout_val, fields_val);
+ try sema.reifyStruct(block, inst, src, layout, backing_int_val, fields_val, name_strategy);
},
.Enum => {
const struct_val = union_val.val.castTag(.aggregate).?.data;
@@ -16171,10 +16787,10 @@ fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I
};
const enum_ty = Type.initPayload(&enum_ty_payload.base);
const enum_val = try Value.Tag.ty.create(new_decl_arena_allocator, enum_ty);
- const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, .{
+ const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
.ty = Type.type,
.val = enum_val,
- }, .anon, "enum", null);
+ }, name_strategy, "enum", inst);
const new_decl = mod.declPtr(new_decl_index);
new_decl.owns_tv = true;
errdefer mod.abortAnonDecl(new_decl_index);
@@ -16185,7 +16801,6 @@ fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I
.tag_ty_inferred = false,
.fields = .{},
.values = .{},
- .node_offset = src.node_offset.x,
.namespace = .{
.parent = block.namespace,
.ty = enum_ty,
@@ -16204,43 +16819,39 @@ fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I
// Fields
const fields_len = try sema.usizeCast(block, src, fields_val.sliceLen(mod));
- if (fields_len > 0) {
- try enum_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len);
- try enum_obj.values.ensureTotalCapacityContext(new_decl_arena_allocator, fields_len, .{
+ try enum_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len);
+ try enum_obj.values.ensureTotalCapacityContext(new_decl_arena_allocator, fields_len, .{
+ .ty = enum_obj.tag_ty,
+ .mod = mod,
+ });
+
+ var i: usize = 0;
+ while (i < fields_len) : (i += 1) {
+ const elem_val = try fields_val.elemValue(sema.mod, sema.arena, i);
+ const field_struct_val = elem_val.castTag(.aggregate).?.data;
+ // TODO use reflection instead of magic numbers here
+ // name: []const u8
+ const name_val = field_struct_val[0];
+ // value: comptime_int
+ const value_val = field_struct_val[1];
+
+ const field_name = try name_val.toAllocatedBytes(
+ Type.initTag(.const_slice_u8),
+ new_decl_arena_allocator,
+ sema.mod,
+ );
+
+ const gop = enum_obj.fields.getOrPutAssumeCapacity(field_name);
+ if (gop.found_existing) {
+ // TODO: better source location
+ return sema.fail(block, src, "duplicate enum tag {s}", .{field_name});
+ }
+
+ const copied_tag_val = try value_val.copy(new_decl_arena_allocator);
+ enum_obj.values.putAssumeCapacityNoClobberContext(copied_tag_val, {}, .{
.ty = enum_obj.tag_ty,
.mod = mod,
});
-
- var i: usize = 0;
- while (i < fields_len) : (i += 1) {
- const elem_val = try fields_val.elemValue(sema.mod, sema.arena, i);
- const field_struct_val = elem_val.castTag(.aggregate).?.data;
- // TODO use reflection instead of magic numbers here
- // name: []const u8
- const name_val = field_struct_val[0];
- // value: comptime_int
- const value_val = field_struct_val[1];
-
- const field_name = try name_val.toAllocatedBytes(
- Type.initTag(.const_slice_u8),
- new_decl_arena_allocator,
- sema.mod,
- );
-
- const gop = enum_obj.fields.getOrPutAssumeCapacity(field_name);
- if (gop.found_existing) {
- // TODO: better source location
- return sema.fail(block, src, "duplicate enum tag {s}", .{field_name});
- }
-
- const copied_tag_val = try value_val.copy(new_decl_arena_allocator);
- enum_obj.values.putAssumeCapacityNoClobberContext(copied_tag_val, {}, .{
- .ty = enum_obj.tag_ty,
- .mod = mod,
- });
- }
- } else {
- return sema.fail(block, src, "enums must have at least one field", .{});
}
try new_decl.finalizeNewArena(&new_decl_arena);
@@ -16268,17 +16879,16 @@ fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I
};
const opaque_ty = Type.initPayload(&opaque_ty_payload.base);
const opaque_val = try Value.Tag.ty.create(new_decl_arena_allocator, opaque_ty);
- const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, .{
+ const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
.ty = Type.type,
.val = opaque_val,
- }, .anon, "opaque", null);
+ }, name_strategy, "opaque", inst);
const new_decl = mod.declPtr(new_decl_index);
new_decl.owns_tv = true;
errdefer mod.abortAnonDecl(new_decl_index);
opaque_obj.* = .{
.owner_decl = new_decl_index,
- .node_offset = src.node_offset.x,
.namespace = .{
.parent = block.namespace,
.ty = opaque_ty,
@@ -16327,10 +16937,10 @@ fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I
};
const union_ty = Type.initPayload(&union_payload.base);
const new_union_val = try Value.Tag.ty.create(new_decl_arena_allocator, union_ty);
- const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, .{
+ const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
.ty = Type.type,
.val = new_union_val,
- }, .anon, "union", null);
+ }, name_strategy, "union", inst);
const new_decl = mod.declPtr(new_decl_index);
new_decl.owns_tv = true;
errdefer mod.abortAnonDecl(new_decl_index);
@@ -16338,7 +16948,6 @@ fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I
.owner_decl = new_decl_index,
.tag_ty = Type.initTag(.@"null"),
.fields = .{},
- .node_offset = src.node_offset.x,
.zir_index = inst,
.layout = layout,
.status = .have_field_types,
@@ -16367,58 +16976,54 @@ fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I
}
// Fields
- if (fields_len > 0) {
- try union_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len);
+ try union_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len);
- var i: usize = 0;
- while (i < fields_len) : (i += 1) {
- const elem_val = try fields_val.elemValue(sema.mod, sema.arena, i);
- const field_struct_val = elem_val.castTag(.aggregate).?.data;
- // TODO use reflection instead of magic numbers here
- // name: []const u8
- const name_val = field_struct_val[0];
- // field_type: type,
- const field_type_val = field_struct_val[1];
- // alignment: comptime_int,
- const alignment_val = field_struct_val[2];
+ var i: usize = 0;
+ while (i < fields_len) : (i += 1) {
+ const elem_val = try fields_val.elemValue(sema.mod, sema.arena, i);
+ const field_struct_val = elem_val.castTag(.aggregate).?.data;
+ // TODO use reflection instead of magic numbers here
+ // name: []const u8
+ const name_val = field_struct_val[0];
+ // field_type: type,
+ const field_type_val = field_struct_val[1];
+ // alignment: comptime_int,
+ const alignment_val = field_struct_val[2];
- const field_name = try name_val.toAllocatedBytes(
- Type.initTag(.const_slice_u8),
- new_decl_arena_allocator,
- sema.mod,
- );
+ const field_name = try name_val.toAllocatedBytes(
+ Type.initTag(.const_slice_u8),
+ new_decl_arena_allocator,
+ sema.mod,
+ );
- if (enum_field_names) |set| {
- set.putAssumeCapacity(field_name, {});
- }
-
- if (tag_ty_field_names) |*names| {
- const enum_has_field = names.orderedRemove(field_name);
- if (!enum_has_field) {
- const msg = msg: {
- const msg = try sema.errMsg(block, src, "no field named '{s}' in enum '{}'", .{ field_name, union_obj.tag_ty.fmt(sema.mod) });
- errdefer msg.destroy(sema.gpa);
- try sema.addDeclaredHereNote(msg, union_obj.tag_ty);
- break :msg msg;
- };
- return sema.failWithOwnedErrorMsg(msg);
- }
- }
-
- const gop = union_obj.fields.getOrPutAssumeCapacity(field_name);
- if (gop.found_existing) {
- // TODO: better source location
- return sema.fail(block, src, "duplicate union field {s}", .{field_name});
- }
-
- var buffer: Value.ToTypeBuffer = undefined;
- gop.value_ptr.* = .{
- .ty = try field_type_val.toType(&buffer).copy(new_decl_arena_allocator),
- .abi_align = @intCast(u32, alignment_val.toUnsignedInt(target)),
- };
+ if (enum_field_names) |set| {
+ set.putAssumeCapacity(field_name, {});
}
- } else {
- return sema.fail(block, src, "unions must have at least one field", .{});
+
+ if (tag_ty_field_names) |*names| {
+ const enum_has_field = names.orderedRemove(field_name);
+ if (!enum_has_field) {
+ const msg = msg: {
+ const msg = try sema.errMsg(block, src, "no field named '{s}' in enum '{}'", .{ field_name, union_obj.tag_ty.fmt(sema.mod) });
+ errdefer msg.destroy(sema.gpa);
+ try sema.addDeclaredHereNote(msg, union_obj.tag_ty);
+ break :msg msg;
+ };
+ return sema.failWithOwnedErrorMsg(msg);
+ }
+ }
+
+ const gop = union_obj.fields.getOrPutAssumeCapacity(field_name);
+ if (gop.found_existing) {
+ // TODO: better source location
+ return sema.fail(block, src, "duplicate union field {s}", .{field_name});
+ }
+
+ var buffer: Value.ToTypeBuffer = undefined;
+ gop.value_ptr.* = .{
+ .ty = try field_type_val.toType(&buffer).copy(new_decl_arena_allocator),
+ .abi_align = @intCast(u32, alignment_val.toUnsignedInt(target)),
+ };
}
if (tag_ty_field_names) |names| {
@@ -16534,7 +17139,7 @@ fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I
return sema.addType(ty);
},
.BoundFn => @panic("TODO delete BoundFn from the language"),
- .Frame => @panic("TODO implement https://github.com/ziglang/zig/issues/10710"),
+ .Frame => return sema.failWithUseOfAsync(block, src),
}
}
@@ -16621,8 +17226,10 @@ fn reifyStruct(
block: *Block,
inst: Zir.Inst.Index,
src: LazySrcLoc,
- layout_val: Value,
+ layout: std.builtin.Type.ContainerLayout,
+ backing_int_val: Value,
fields_val: Value,
+ name_strategy: Zir.Inst.NameStrategy,
) CompileError!Air.Inst.Ref {
var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa);
errdefer new_decl_arena.deinit();
@@ -16632,19 +17239,18 @@ fn reifyStruct(
const struct_ty = try Type.Tag.@"struct".create(new_decl_arena_allocator, struct_obj);
const new_struct_val = try Value.Tag.ty.create(new_decl_arena_allocator, struct_ty);
const mod = sema.mod;
- const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, .{
+ const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
.ty = Type.type,
.val = new_struct_val,
- }, .anon, "struct", null);
+ }, name_strategy, "struct", inst);
const new_decl = mod.declPtr(new_decl_index);
new_decl.owns_tv = true;
errdefer mod.abortAnonDecl(new_decl_index);
struct_obj.* = .{
.owner_decl = new_decl_index,
.fields = .{},
- .node_offset = src.node_offset.x,
.zir_index = inst,
- .layout = layout_val.toEnum(std.builtin.Type.ContainerLayout),
+ .layout = layout,
.status = .have_field_types,
.known_non_opv = false,
.namespace = .{
@@ -16710,6 +17316,41 @@ fn reifyStruct(
};
}
+ if (layout == .Packed) {
+ struct_obj.status = .layout_wip;
+
+ for (struct_obj.fields.values()) |field, index| {
+ sema.resolveTypeLayout(block, src, field.ty) catch |err| switch (err) {
+ error.AnalysisFail => {
+ const msg = sema.err orelse return err;
+ try sema.addFieldErrNote(block, struct_ty, index, msg, "while checking this field", .{});
+ return err;
+ },
+ else => return err,
+ };
+ }
+
+ var fields_bit_sum: u64 = 0;
+ for (struct_obj.fields.values()) |field| {
+ fields_bit_sum += field.ty.bitSize(target);
+ }
+
+ if (backing_int_val.optionalValue()) |payload| {
+ var buf: Value.ToTypeBuffer = undefined;
+ const backing_int_ty = payload.toType(&buf);
+ try sema.checkBackingIntType(block, src, backing_int_ty, fields_bit_sum);
+ struct_obj.backing_int_ty = try backing_int_ty.copy(new_decl_arena_allocator);
+ } else {
+ var buf: Type.Payload.Bits = .{
+ .base = .{ .tag = .int_unsigned },
+ .data = @intCast(u16, fields_bit_sum),
+ };
+ struct_obj.backing_int_ty = try Type.initPayload(&buf.base).copy(new_decl_arena_allocator);
+ }
+
+ struct_obj.status = .have_layout;
+ }
+
try new_decl.finalizeNewArena(&new_decl_arena);
return sema.analyzeDeclVal(block, src, new_decl_index);
}
@@ -16736,13 +17377,13 @@ fn zirTypeName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
fn zirFrameType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
- return sema.fail(block, src, "TODO: Sema.zirFrameType", .{});
+ return sema.failWithUseOfAsync(block, src);
}
fn zirFrameSize(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
- return sema.fail(block, src, "TODO: Sema.zirFrameSize", .{});
+ return sema.failWithUseOfAsync(block, src);
}
fn zirFloatToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -16834,7 +17475,7 @@ fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
try sema.requireRuntimeBlock(block, src, operand_src);
- if (block.wantSafety()) {
+ if (block.wantSafety() and try sema.typeHasRuntimeBits(block, sema.src, type_res.elemType2())) {
if (!type_res.isAllowzeroPtr()) {
const is_non_zero = try block.addBinOp(.cmp_neq, operand_coerced, .zero_usize);
try sema.addSafetyCheck(block, is_non_zero, .cast_to_null);
@@ -16931,17 +17572,10 @@ fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat
}
try sema.requireRuntimeBlock(block, src, operand_src);
- if (block.wantSafety() and !dest_ty.isAnyError()) {
- const err_int_inst = try block.addBitCast(Type.u16, operand);
- // TODO: Output a switch instead of chained OR's.
- var found_match: Air.Inst.Ref = undefined;
- for (dest_ty.errorSetNames()) |dest_err_name, i| {
- const dest_err_int = (try sema.mod.getErrorValue(dest_err_name)).value;
- const dest_err_int_inst = try sema.addIntUnsigned(Type.u16, dest_err_int);
- const next_match = try block.addBinOp(.cmp_eq, dest_err_int_inst, err_int_inst);
- found_match = if (i == 0) next_match else try block.addBinOp(.bool_or, found_match, next_match);
- }
- try sema.addSafetyCheck(block, found_match, .invalid_error_code);
+ if (block.wantSafety() and !dest_ty.isAnyError() and sema.mod.comp.bin_file.options.use_llvm) {
+ const err_int_inst = try block.addBitCast(Type.err_int, operand);
+ const ok = try block.addTyOp(.error_set_has_value, dest_ty, err_int_inst);
+ try sema.addSafetyCheck(block, ok, .invalid_error_code);
}
return block.addBitCast(dest_ty, operand);
}
@@ -16969,6 +17603,15 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
else
operand;
+ if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |operand_val| {
+ if (!dest_ty.ptrAllowsZero() and operand_val.isUndef()) {
+ return sema.failWithUseOfUndef(block, operand_src);
+ }
+ if (!dest_ty.ptrAllowsZero() and operand_val.isNull()) {
+ return sema.fail(block, operand_src, "null pointer casted to type {}", .{dest_ty.fmt(sema.mod)});
+ }
+ }
+
const dest_elem_ty = dest_ty.elemType2();
try sema.resolveTypeLayout(block, dest_ty_src, dest_elem_ty);
const dest_align = dest_ty.ptrAlignment(target);
@@ -17126,7 +17769,9 @@ fn zirAlignCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
}
try sema.requireRuntimeBlock(block, inst_data.src(), ptr_src);
- if (block.wantSafety() and dest_align > 1) {
+ if (block.wantSafety() and dest_align > 1 and
+ try sema.typeHasRuntimeBits(block, sema.src, dest_ty.elemType2()))
+ {
const val_payload = try sema.arena.create(Value.Payload.U64);
val_payload.* = .{
.base = .{ .tag = .int_u64 },
@@ -17145,7 +17790,7 @@ fn zirAlignCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize);
const ok = if (ptr_ty.isSlice()) ok: {
const len = try sema.analyzeSliceLen(block, ptr_src, ptr);
- const len_zero = try block.addBinOp(.cmp_eq, len, try sema.addConstant(Type.usize, Value.zero));
+ const len_zero = try block.addBinOp(.cmp_eq, len, .zero_usize);
break :ok try block.addBinOp(.bit_or, len_zero, is_aligned);
} else is_aligned;
try sema.addSafetyCheck(block, ok, .incorrect_alignment);
@@ -17158,11 +17803,11 @@ fn zirBitCount(
block: *Block,
inst: Zir.Inst.Index,
air_tag: Air.Inst.Tag,
- comptimeOp: fn (val: Value, ty: Type, target: std.Target) u64,
+ comptime comptimeOp: fn (val: Value, ty: Type, target: std.Target) u64,
) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
- const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
+ const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const operand = try sema.resolveInst(inst_data.operand);
const operand_ty = sema.typeOf(operand);
_ = try checkIntOrVector(sema, block, operand, operand_src);
@@ -17214,17 +17859,16 @@ fn zirBitCount(
fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
- const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
- const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
+ const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const operand = try sema.resolveInst(inst_data.operand);
const operand_ty = sema.typeOf(operand);
- const scalar_ty = try sema.checkIntOrVectorAllowComptime(block, operand_ty, operand_src);
+ const scalar_ty = try sema.checkIntOrVector(block, operand, operand_src);
const target = sema.mod.getTarget();
const bits = scalar_ty.intInfo(target).bits;
if (bits % 8 != 0) {
return sema.fail(
block,
- ty_src,
+ operand_src,
"@byteSwap requires the number of bits to be evenly divisible by 8, but {} has {} bits",
.{ scalar_ty.fmt(sema.mod), bits },
);
@@ -17235,7 +17879,7 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
switch (operand_ty.zigTypeTag()) {
- .Int, .ComptimeInt => {
+ .Int => {
const runtime_src = if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| {
if (val.isUndef()) return sema.addConstUndef(operand_ty);
const result_val = try val.byteSwap(operand_ty, target, sema.arena);
@@ -17273,7 +17917,7 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
- const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
+ const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const operand = try sema.resolveInst(inst_data.operand);
const operand_ty = sema.typeOf(operand);
_ = try sema.checkIntOrVectorAllowComptime(block, operand_ty, operand_src);
@@ -18973,13 +19617,13 @@ fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
fn zirBuiltinAsyncCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
- return sema.fail(block, src, "TODO: Sema.zirBuiltinAsyncCall", .{});
+ return sema.failWithUseOfAsync(block, src);
}
fn zirResume(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
- return sema.fail(block, src, "TODO: Sema.zirResume", .{});
+ return sema.failWithUseOfAsync(block, src);
}
fn zirAwait(
@@ -18990,7 +19634,7 @@ fn zirAwait(
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
- return sema.fail(block, src, "TODO: Sema.zirAwait", .{});
+ return sema.failWithUseOfAsync(block, src);
}
fn zirAwaitNosuspend(
@@ -19001,7 +19645,7 @@ fn zirAwaitNosuspend(
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const src = LazySrcLoc.nodeOffset(extra.node);
- return sema.fail(block, src, "TODO: Sema.zirAwaitNosuspend", .{});
+ return sema.failWithUseOfAsync(block, src);
}
fn zirVarExtended(
@@ -19670,6 +20314,8 @@ fn validateRunTimeType(
};
}
+const TypeSet = std.HashMapUnmanaged(Type, void, Type.HashContext64, std.hash_map.default_max_load_percentage);
+
fn explainWhyTypeIsComptime(
sema: *Sema,
block: *Block,
@@ -19677,6 +20323,22 @@ fn explainWhyTypeIsComptime(
msg: *Module.ErrorMsg,
src_loc: Module.SrcLoc,
ty: Type,
+) CompileError!void {
+ var type_set = TypeSet{};
+ defer type_set.deinit(sema.gpa);
+
+ try sema.resolveTypeFully(block, src, ty);
+ return sema.explainWhyTypeIsComptimeInner(block, src, msg, src_loc, ty, &type_set);
+}
+
+fn explainWhyTypeIsComptimeInner(
+ sema: *Sema,
+ block: *Block,
+ src: LazySrcLoc,
+ msg: *Module.ErrorMsg,
+ src_loc: Module.SrcLoc,
+ ty: Type,
+ type_set: *TypeSet,
) CompileError!void {
const mod = sema.mod;
switch (ty.zigTypeTag()) {
@@ -19714,7 +20376,7 @@ fn explainWhyTypeIsComptime(
},
.Array, .Vector => {
- try sema.explainWhyTypeIsComptime(block, src, msg, src_loc, ty.elemType());
+ try sema.explainWhyTypeIsComptimeInner(block, src, msg, src_loc, ty.elemType(), type_set);
},
.Pointer => {
const elem_ty = ty.elemType2();
@@ -19732,18 +20394,20 @@ fn explainWhyTypeIsComptime(
}
return;
}
- try sema.explainWhyTypeIsComptime(block, src, msg, src_loc, ty.elemType());
+ try sema.explainWhyTypeIsComptimeInner(block, src, msg, src_loc, ty.elemType(), type_set);
},
.Optional => {
var buf: Type.Payload.ElemType = undefined;
- try sema.explainWhyTypeIsComptime(block, src, msg, src_loc, ty.optionalChild(&buf));
+ try sema.explainWhyTypeIsComptimeInner(block, src, msg, src_loc, ty.optionalChild(&buf), type_set);
},
.ErrorUnion => {
- try sema.explainWhyTypeIsComptime(block, src, msg, src_loc, ty.errorUnionPayload());
+ try sema.explainWhyTypeIsComptimeInner(block, src, msg, src_loc, ty.errorUnionPayload(), type_set);
},
.Struct => {
+ if ((try type_set.getOrPutContext(sema.gpa, ty, .{ .mod = mod })).found_existing) return;
+
if (ty.castTag(.@"struct")) |payload| {
const struct_obj = payload.data;
for (struct_obj.fields.values()) |field, i| {
@@ -19751,9 +20415,10 @@ fn explainWhyTypeIsComptime(
.index = i,
.range = .type,
});
+
if (try sema.typeRequiresComptime(block, src, field.ty)) {
try mod.errNoteNonLazy(field_src_loc, msg, "struct requires comptime because of this field", .{});
- try sema.explainWhyTypeIsComptime(block, src, msg, field_src_loc, field.ty);
+ try sema.explainWhyTypeIsComptimeInner(block, src, msg, field_src_loc, field.ty, type_set);
}
}
}
@@ -19761,6 +20426,8 @@ fn explainWhyTypeIsComptime(
},
.Union => {
+ if ((try type_set.getOrPutContext(sema.gpa, ty, .{ .mod = mod })).found_existing) return;
+
if (ty.cast(Type.Payload.Union)) |payload| {
const union_obj = payload.data;
for (union_obj.fields.values()) |field, i| {
@@ -19768,9 +20435,10 @@ fn explainWhyTypeIsComptime(
.index = i,
.range = .type,
});
+
if (try sema.typeRequiresComptime(block, src, field.ty)) {
try mod.errNoteNonLazy(field_src_loc, msg, "union requires comptime because of this field", .{});
- try sema.explainWhyTypeIsComptime(block, src, msg, field_src_loc, field.ty);
+ try sema.explainWhyTypeIsComptimeInner(block, src, msg, field_src_loc, field.ty, type_set);
}
}
}
@@ -19911,8 +20579,8 @@ fn validatePackedType(ty: Type) bool {
.AnyFrame,
.Fn,
.Array,
- .Optional,
=> return false,
+ .Optional => return ty.isPtrLikeOptional(),
.Void,
.Bool,
.Float,
@@ -19978,11 +20646,13 @@ pub const PanicId = enum {
shl_overflow,
shr_overflow,
divide_by_zero,
- remainder_division_zero_negative,
exact_division_remainder,
/// TODO make this call `std.builtin.panicInactiveUnionField`.
inactive_union_field,
integer_part_out_of_bounds,
+ corrupt_switch,
+ shift_rhs_too_big,
+ invalid_enum_value,
};
fn addSafetyCheck(
@@ -20076,7 +20746,7 @@ fn panicWithMsg(
const arena = sema.arena;
const this_feature_is_implemented_in_the_backend =
- mod.comp.bin_file.options.object_format == .c or
+ mod.comp.bin_file.options.target.ofmt == .c or
mod.comp.bin_file.options.use_llvm;
if (!this_feature_is_implemented_in_the_backend) {
// TODO implement this feature in all the backends and then delete this branch
@@ -20274,10 +20944,12 @@ fn safetyPanic(
.shl_overflow => "left shift overflowed bits",
.shr_overflow => "right shift overflowed bits",
.divide_by_zero => "division by zero",
- .remainder_division_zero_negative => "remainder division by zero or negative value",
.exact_division_remainder => "exact division produced remainder",
.inactive_union_field => "access of inactive union field",
.integer_part_out_of_bounds => "integer part of floating point value out of bounds",
+ .corrupt_switch => "switch on corrupt value",
+ .shift_rhs_too_big => "shift amount is greater than the type size",
+ .invalid_enum_value => "invalid enum value",
};
const msg_inst = msg_inst: {
@@ -20736,14 +21408,30 @@ fn fieldCallBind(
switch (concrete_ty.zigTypeTag()) {
.Struct => {
const struct_ty = try sema.resolveTypeFields(block, src, concrete_ty);
- const struct_obj = struct_ty.castTag(.@"struct").?.data;
+ if (struct_ty.castTag(.@"struct")) |struct_obj| {
+ const field_index_usize = struct_obj.data.fields.getIndex(field_name) orelse
+ break :find_field;
+ const field_index = @intCast(u32, field_index_usize);
+ const field = struct_obj.data.fields.values()[field_index];
- const field_index_usize = struct_obj.fields.getIndex(field_name) orelse
- break :find_field;
- const field_index = @intCast(u32, field_index_usize);
- const field = struct_obj.fields.values()[field_index];
-
- return finishFieldCallBind(sema, block, src, ptr_ty, field.ty, field_index, object_ptr);
+ return finishFieldCallBind(sema, block, src, ptr_ty, field.ty, field_index, object_ptr);
+ } else if (struct_ty.isTuple()) {
+ if (mem.eql(u8, field_name, "len")) {
+ return sema.addIntUnsigned(Type.usize, struct_ty.structFieldCount());
+ }
+ if (std.fmt.parseUnsigned(u32, field_name, 10)) |field_index| {
+ if (field_index >= struct_ty.structFieldCount()) break :find_field;
+ return finishFieldCallBind(sema, block, src, ptr_ty, struct_ty.structFieldType(field_index), field_index, object_ptr);
+ } else |_| {}
+ } else {
+ const max = struct_ty.structFieldCount();
+ var i: u32 = 0;
+ while (i < max) : (i += 1) {
+ if (mem.eql(u8, struct_ty.structFieldName(i), field_name)) {
+ return finishFieldCallBind(sema, block, src, ptr_ty, struct_ty.structFieldType(i), i, object_ptr);
+ }
+ }
+ }
},
.Union => {
const union_ty = try sema.resolveTypeFields(block, src, concrete_ty);
@@ -21009,7 +21697,7 @@ fn structFieldPtrByIndex(
const elem_size_bits = ptr_ty_data.pointee_type.bitSize(target);
if (elem_size_bytes * 8 == elem_size_bits) {
const byte_offset = ptr_ty_data.bit_offset / 8;
- const new_align = @as(u32, 1) << @intCast(u5, @ctz(u64, byte_offset | parent_align));
+ const new_align = @as(u32, 1) << @intCast(u5, @ctz(byte_offset | parent_align));
ptr_ty_data.bit_offset = 0;
ptr_ty_data.host_size = 0;
ptr_ty_data.@"align" = new_align;
@@ -21184,6 +21872,18 @@ fn unionFieldPtr(
.@"addrspace" = union_ptr_ty.ptrAddressSpace(),
});
+ if (initializing and field.ty.zigTypeTag() == .NoReturn) {
+ const msg = msg: {
+ const msg = try sema.errMsg(block, src, "cannot initialize 'noreturn' field of union", .{});
+ errdefer msg.destroy(sema.gpa);
+
+ try sema.addFieldErrNote(block, union_ty, field_index, msg, "field '{s}' declared here", .{field_name});
+ try sema.addDeclaredHereNote(msg, union_ty);
+ break :msg msg;
+ };
+ return sema.failWithOwnedErrorMsg(msg);
+ }
+
if (try sema.resolveDefinedValue(block, src, union_ptr)) |union_ptr_val| ct: {
switch (union_obj.layout) {
.Auto => if (!initializing) {
@@ -21192,17 +21892,18 @@ fn unionFieldPtr(
if (union_val.isUndef()) {
return sema.failWithUseOfUndef(block, src);
}
+ const enum_field_index = union_obj.tag_ty.enumFieldIndex(field_name).?;
const tag_and_val = union_val.castTag(.@"union").?.data;
var field_tag_buf: Value.Payload.U32 = .{
.base = .{ .tag = .enum_field_index },
- .data = field_index,
+ .data = @intCast(u32, enum_field_index),
};
const field_tag = Value.initPayload(&field_tag_buf.base);
const tag_matches = tag_and_val.tag.eql(field_tag, union_obj.tag_ty, sema.mod);
if (!tag_matches) {
const msg = msg: {
const active_index = tag_and_val.tag.castTag(.enum_field_index).?.data;
- const active_field_name = union_obj.fields.keys()[active_index];
+ const active_field_name = union_obj.tag_ty.enumFieldName(active_index);
const msg = try sema.errMsg(block, src, "access of union field '{s}' while field '{s}' is active", .{ field_name, active_field_name });
errdefer msg.destroy(sema.gpa);
try sema.addDeclaredHereNote(msg, union_ty);
@@ -21227,15 +21928,18 @@ fn unionFieldPtr(
if (!initializing and union_obj.layout == .Auto and block.wantSafety() and
union_ty.unionTagTypeSafety() != null and union_obj.fields.count() > 1)
{
- const enum_ty = union_ty.unionTagTypeHypothetical();
const wanted_tag_val = try Value.Tag.enum_field_index.create(sema.arena, field_index);
- const wanted_tag = try sema.addConstant(enum_ty, wanted_tag_val);
+ const wanted_tag = try sema.addConstant(union_obj.tag_ty, wanted_tag_val);
// TODO would it be better if get_union_tag supported pointers to unions?
const union_val = try block.addTyOp(.load, union_ty, union_ptr);
- const active_tag = try block.addTyOp(.get_union_tag, enum_ty, union_val);
+ const active_tag = try block.addTyOp(.get_union_tag, union_obj.tag_ty, union_val);
const ok = try block.addBinOp(.cmp_eq, active_tag, wanted_tag);
try sema.addSafetyCheck(block, ok, .inactive_union_field);
}
+ if (field.ty.zigTypeTag() == .NoReturn) {
+ _ = try block.addNoOp(.unreach);
+ return Air.Inst.Ref.unreachable_value;
+ }
return block.addStructFieldPtr(union_ptr, field_index, ptr_field_ty);
}
@@ -21259,9 +21963,10 @@ fn unionFieldVal(
if (union_val.isUndef()) return sema.addConstUndef(field.ty);
const tag_and_val = union_val.castTag(.@"union").?.data;
+ const enum_field_index = union_obj.tag_ty.enumFieldIndex(field_name).?;
var field_tag_buf: Value.Payload.U32 = .{
.base = .{ .tag = .enum_field_index },
- .data = field_index,
+ .data = @intCast(u32, enum_field_index),
};
const field_tag = Value.initPayload(&field_tag_buf.base);
const tag_matches = tag_and_val.tag.eql(field_tag, union_obj.tag_ty, sema.mod);
@@ -21272,7 +21977,7 @@ fn unionFieldVal(
} else {
const msg = msg: {
const active_index = tag_and_val.tag.castTag(.enum_field_index).?.data;
- const active_field_name = union_obj.fields.keys()[active_index];
+ const active_field_name = union_obj.tag_ty.enumFieldName(active_index);
const msg = try sema.errMsg(block, src, "access of union field '{s}' while field '{s}' is active", .{ field_name, active_field_name });
errdefer msg.destroy(sema.gpa);
try sema.addDeclaredHereNote(msg, union_ty);
@@ -21297,13 +22002,16 @@ fn unionFieldVal(
if (union_obj.layout == .Auto and block.wantSafety() and
union_ty.unionTagTypeSafety() != null and union_obj.fields.count() > 1)
{
- const enum_ty = union_ty.unionTagTypeHypothetical();
const wanted_tag_val = try Value.Tag.enum_field_index.create(sema.arena, field_index);
- const wanted_tag = try sema.addConstant(enum_ty, wanted_tag_val);
- const active_tag = try block.addTyOp(.get_union_tag, enum_ty, union_byval);
+ const wanted_tag = try sema.addConstant(union_obj.tag_ty, wanted_tag_val);
+ const active_tag = try block.addTyOp(.get_union_tag, union_obj.tag_ty, union_byval);
const ok = try block.addBinOp(.cmp_eq, active_tag, wanted_tag);
try sema.addSafetyCheck(block, ok, .inactive_union_field);
}
+ if (field.ty.zigTypeTag() == .NoReturn) {
+ _ = try block.addNoOp(.unreach);
+ return Air.Inst.Ref.unreachable_value;
+ }
return block.addStructFieldVal(union_byval, field_index, field.ty);
}
@@ -21543,8 +22251,7 @@ fn tupleField(
if (try sema.resolveMaybeUndefVal(block, tuple_src, tuple)) |tuple_val| {
if (tuple_val.isUndef()) return sema.addConstUndef(field_ty);
- const field_values = tuple_val.castTag(.aggregate).?.data;
- return sema.addConstant(field_ty, field_values[field_index]);
+ return sema.addConstant(field_ty, tuple_val.fieldValue(tuple_ty, field_index));
}
try sema.validateRuntimeElemAccess(block, field_index_src, field_ty, tuple_ty, tuple_src);
@@ -21886,7 +22593,7 @@ fn coerceExtra(
// Function body to function pointer.
if (inst_ty.zigTypeTag() == .Fn) {
const fn_val = try sema.resolveConstValue(block, .unneeded, inst, undefined);
- const fn_decl = fn_val.castTag(.function).?.data.owner_decl;
+ const fn_decl = fn_val.pointerDecl().?;
const inst_as_ptr = try sema.analyzeDeclRef(fn_decl);
return sema.coerce(block, dest_ty, inst_as_ptr, inst_src);
}
@@ -21966,7 +22673,6 @@ fn coerceExtra(
.ok => {},
else => break :src_c_ptr,
}
- // TODO add safety check for null pointer
return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src);
}
@@ -23271,7 +23977,10 @@ fn coerceVarArgParam(
inst_src: LazySrcLoc,
) !Air.Inst.Ref {
const inst_ty = sema.typeOf(inst);
+ if (block.is_typeof) return inst;
+
switch (inst_ty.zigTypeTag()) {
+ // TODO consider casting to c_int/f64 if they fit
.ComptimeInt, .ComptimeFloat => return sema.fail(block, inst_src, "integer and float literals in var args function must be casted", .{}),
else => {},
}
@@ -23653,7 +24362,10 @@ fn beginComptimePtrMutation(
const array_len_including_sentinel =
try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel());
const elems = try arena.alloc(Value, array_len_including_sentinel);
- mem.set(Value, elems, repeated_val);
+ if (elems.len > 0) elems[0] = repeated_val;
+ for (elems[1..]) |*elem| {
+ elem.* = try repeated_val.copy(arena);
+ }
val_ptr.* = try Value.Tag.aggregate.create(arena, elems);
@@ -24439,6 +25151,24 @@ fn coerceCompatiblePtrs(
return sema.addConstant(dest_ty, val);
}
try sema.requireRuntimeBlock(block, inst_src, null);
+ const inst_ty = sema.typeOf(inst);
+ const inst_allows_zero = (inst_ty.zigTypeTag() == .Pointer and inst_ty.ptrAllowsZero()) or true;
+ if (block.wantSafety() and inst_allows_zero and !dest_ty.ptrAllowsZero() and
+ try sema.typeHasRuntimeBits(block, sema.src, dest_ty.elemType2()))
+ {
+ const actual_ptr = if (inst_ty.isSlice())
+ try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty)
+ else
+ inst;
+ const ptr_int = try block.addUnOp(.ptrtoint, actual_ptr);
+ const is_non_zero = try block.addBinOp(.cmp_neq, ptr_int, .zero_usize);
+ const ok = if (inst_ty.isSlice()) ok: {
+ const len = try sema.analyzeSliceLen(block, inst_src, inst);
+ const len_zero = try block.addBinOp(.cmp_eq, len, .zero_usize);
+ break :ok try block.addBinOp(.bit_or, len_zero, is_non_zero);
+ } else is_non_zero;
+ try sema.addSafetyCheck(block, ok, .cast_to_null);
+ }
return sema.bitCast(block, dest_ty, inst, inst_src);
}
@@ -24467,8 +25197,7 @@ fn coerceEnumToUnion(
const enum_tag = try sema.coerce(block, tag_ty, inst, inst_src);
if (try sema.resolveDefinedValue(block, inst_src, enum_tag)) |val| {
- const union_obj = union_ty.cast(Type.Payload.Union).?.data;
- const field_index = union_obj.tag_ty.enumTagFieldIndex(val, sema.mod) orelse {
+ const field_index = union_ty.unionTagFieldIndex(val, sema.mod) orelse {
const msg = msg: {
const msg = try sema.errMsg(block, inst_src, "union '{}' has no tag with value '{}'", .{
union_ty.fmt(sema.mod), val.fmtValue(tag_ty, sema.mod),
@@ -24479,8 +25208,22 @@ fn coerceEnumToUnion(
};
return sema.failWithOwnedErrorMsg(msg);
};
+
+ const union_obj = union_ty.cast(Type.Payload.Union).?.data;
const field = union_obj.fields.values()[field_index];
const field_ty = try sema.resolveTypeFields(block, inst_src, field.ty);
+ if (field_ty.zigTypeTag() == .NoReturn) {
+ const msg = msg: {
+ const msg = try sema.errMsg(block, inst_src, "cannot initialize 'noreturn' field of union", .{});
+ errdefer msg.destroy(sema.gpa);
+
+ const field_name = union_obj.fields.keys()[field_index];
+ try sema.addFieldErrNote(block, union_ty, field_index, msg, "field '{s}' declared here", .{field_name});
+ try sema.addDeclaredHereNote(msg, union_ty);
+ break :msg msg;
+ };
+ return sema.failWithOwnedErrorMsg(msg);
+ }
const opv = (try sema.typeHasOnePossibleValue(block, inst_src, field_ty)) orelse {
const msg = msg: {
const field_name = union_obj.fields.keys()[field_index];
@@ -24516,13 +25259,37 @@ fn coerceEnumToUnion(
return sema.failWithOwnedErrorMsg(msg);
}
+ const union_obj = union_ty.cast(Type.Payload.Union).?.data;
+ {
+ var msg: ?*Module.ErrorMsg = null;
+ errdefer if (msg) |some| some.destroy(sema.gpa);
+
+ for (union_obj.fields.values()) |field, i| {
+ if (field.ty.zigTypeTag() == .NoReturn) {
+ const err_msg = msg orelse try sema.errMsg(
+ block,
+ inst_src,
+ "runtime coercion from enum '{}' to union '{}' which has a 'noreturn' field",
+ .{ tag_ty.fmt(sema.mod), union_ty.fmt(sema.mod) },
+ );
+ msg = err_msg;
+
+ try sema.addFieldErrNote(block, union_ty, i, err_msg, "'noreturn' field here", .{});
+ }
+ }
+ if (msg) |some| {
+ msg = null;
+ try sema.addDeclaredHereNote(some, union_ty);
+ return sema.failWithOwnedErrorMsg(some);
+ }
+ }
+
// If the union has all fields 0 bits, the union value is just the enum value.
if (union_ty.unionHasAllZeroBitFieldTypes()) {
return block.addBitCast(union_ty, enum_tag);
}
const msg = msg: {
- const union_obj = union_ty.cast(Type.Payload.Union).?.data;
const msg = try sema.errMsg(
block,
inst_src,
@@ -24533,11 +25300,11 @@ fn coerceEnumToUnion(
var it = union_obj.fields.iterator();
var field_index: usize = 0;
- while (it.next()) |field| {
+ while (it.next()) |field| : (field_index += 1) {
const field_name = field.key_ptr.*;
const field_ty = field.value_ptr.ty;
+ if (!field_ty.hasRuntimeBits()) continue;
try sema.addFieldErrNote(block, union_ty, field_index, msg, "field '{s}' has type '{}'", .{ field_name, field_ty.fmt(sema.mod) });
- field_index += 1;
}
try sema.addDeclaredHereNote(msg, union_ty);
break :msg msg;
@@ -24840,6 +25607,7 @@ fn coerceTupleToStruct(
// Populate default field values and report errors for missing fields.
var root_msg: ?*Module.ErrorMsg = null;
+ errdefer if (root_msg) |msg| msg.destroy(sema.gpa);
for (field_refs) |*field_ref, i| {
if (field_ref.* != .none) continue;
@@ -24865,6 +25633,7 @@ fn coerceTupleToStruct(
}
if (root_msg) |msg| {
+ root_msg = null;
try sema.addDeclaredHereNote(msg, struct_ty);
return sema.failWithOwnedErrorMsg(msg);
}
@@ -24934,6 +25703,7 @@ fn coerceTupleToTuple(
// Populate default field values and report errors for missing fields.
var root_msg: ?*Module.ErrorMsg = null;
+ errdefer if (root_msg) |msg| msg.destroy(sema.gpa);
for (field_refs) |*field_ref, i| {
if (field_ref.* != .none) continue;
@@ -24969,6 +25739,7 @@ fn coerceTupleToTuple(
}
if (root_msg) |msg| {
+ root_msg = null;
try sema.addDeclaredHereNote(msg, tuple_ty);
return sema.failWithOwnedErrorMsg(msg);
}
@@ -25207,11 +25978,38 @@ fn analyzeIsNull(
return Air.Inst.Ref.bool_false;
}
}
+
+ const operand_ty = sema.typeOf(operand);
+ var buf: Type.Payload.ElemType = undefined;
+ if (operand_ty.zigTypeTag() == .Optional and operand_ty.optionalChild(&buf).zigTypeTag() == .NoReturn) {
+ return Air.Inst.Ref.bool_true;
+ }
try sema.requireRuntimeBlock(block, src, null);
const air_tag: Air.Inst.Tag = if (invert_logic) .is_non_null else .is_null;
return block.addUnOp(air_tag, operand);
}
+fn analyzePtrIsNonErrComptimeOnly(
+ sema: *Sema,
+ block: *Block,
+ src: LazySrcLoc,
+ operand: Air.Inst.Ref,
+) CompileError!Air.Inst.Ref {
+ const ptr_ty = sema.typeOf(operand);
+ assert(ptr_ty.zigTypeTag() == .Pointer);
+ const child_ty = ptr_ty.childType();
+
+ const child_tag = child_ty.zigTypeTag();
+ if (child_tag != .ErrorSet and child_tag != .ErrorUnion) return Air.Inst.Ref.bool_true;
+ if (child_tag == .ErrorSet) return Air.Inst.Ref.bool_false;
+ assert(child_tag == .ErrorUnion);
+
+ _ = block;
+ _ = src;
+
+ return Air.Inst.Ref.none;
+}
+
fn analyzeIsNonErrComptimeOnly(
sema: *Sema,
block: *Block,
@@ -25224,11 +26022,22 @@ fn analyzeIsNonErrComptimeOnly(
if (ot == .ErrorSet) return Air.Inst.Ref.bool_false;
assert(ot == .ErrorUnion);
+ const payload_ty = operand_ty.errorUnionPayload();
+ if (payload_ty.zigTypeTag() == .NoReturn) {
+ return Air.Inst.Ref.bool_false;
+ }
+
if (Air.refToIndex(operand)) |operand_inst| {
- const air_tags = sema.air_instructions.items(.tag);
- if (air_tags[operand_inst] == .wrap_errunion_payload) {
- return Air.Inst.Ref.bool_true;
+ switch (sema.air_instructions.items(.tag)[operand_inst]) {
+ .wrap_errunion_payload => return Air.Inst.Ref.bool_true,
+ .wrap_errunion_err => return Air.Inst.Ref.bool_false,
+ else => {},
}
+ } else if (operand == .undef) {
+ return sema.addConstUndef(Type.bool);
+ } else {
+ // None of the ref tags can be errors.
+ return Air.Inst.Ref.bool_true;
}
const maybe_operand_val = try sema.resolveMaybeUndefVal(block, src, operand);
@@ -25304,6 +26113,21 @@ fn analyzeIsNonErr(
}
}
+fn analyzePtrIsNonErr(
+ sema: *Sema,
+ block: *Block,
+ src: LazySrcLoc,
+ operand: Air.Inst.Ref,
+) CompileError!Air.Inst.Ref {
+ const result = try sema.analyzePtrIsNonErrComptimeOnly(block, src, operand);
+ if (result == .none) {
+ try sema.requireRuntimeBlock(block, src, null);
+ return block.addUnOp(.is_non_err_ptr, operand);
+ } else {
+ return result;
+ }
+}
+
fn analyzeSlice(
sema: *Sema,
block: *Block,
@@ -25330,11 +26154,12 @@ fn analyzeSlice(
var array_ty = ptr_ptr_child_ty;
var slice_ty = ptr_ptr_ty;
var ptr_or_slice = ptr_ptr;
- var elem_ty = ptr_ptr_child_ty.childType();
+ var elem_ty: Type = undefined;
var ptr_sentinel: ?Value = null;
switch (ptr_ptr_child_ty.zigTypeTag()) {
.Array => {
ptr_sentinel = ptr_ptr_child_ty.sentinel();
+ elem_ty = ptr_ptr_child_ty.childType();
},
.Pointer => switch (ptr_ptr_child_ty.ptrSize()) {
.One => {
@@ -25578,6 +26403,27 @@ fn analyzeSlice(
const new_ptr_val = opt_new_ptr_val orelse {
const result = try block.addBitCast(return_ty, new_ptr);
if (block.wantSafety()) {
+ // requirement: slicing C ptr is non-null
+ if (ptr_ptr_child_ty.isCPtr()) {
+ const is_non_null = try sema.analyzeIsNull(block, ptr_src, ptr, true);
+ try sema.addSafetyCheck(block, is_non_null, .unwrap_null);
+ }
+
+ if (slice_ty.isSlice()) {
+ const slice_len_inst = try block.addTyOp(.slice_len, Type.usize, ptr_or_slice);
+ const actual_len = if (slice_ty.sentinel() == null)
+ slice_len_inst
+ else
+ try sema.analyzeArithmetic(block, .add, slice_len_inst, .one, src, end_src, end_src);
+
+ const actual_end = if (slice_sentinel != null)
+ try sema.analyzeArithmetic(block, .add, end, .one, src, end_src, end_src)
+ else
+ end;
+
+ try sema.panicIndexOutOfBounds(block, src, actual_end, actual_len, .cmp_lte);
+ }
+
// requirement: result[new_len] == slice_sentinel
try sema.panicSentinelMismatch(block, src, slice_sentinel, elem_ty, result, new_len);
}
@@ -25639,7 +26485,11 @@ fn analyzeSlice(
break :blk try sema.analyzeArithmetic(block, .add, slice_len_inst, .one, src, end_src, end_src);
} else null;
if (opt_len_inst) |len_inst| {
- try sema.panicIndexOutOfBounds(block, src, end, len_inst, .cmp_lte);
+ const actual_end = if (slice_sentinel != null)
+ try sema.analyzeArithmetic(block, .add, end, .one, src, end_src, end_src)
+ else
+ end;
+ try sema.panicIndexOutOfBounds(block, src, actual_end, len_inst, .cmp_lte);
}
// requirement: start <= end
@@ -26616,9 +27466,6 @@ pub fn resolveTypeLayout(
src: LazySrcLoc,
ty: Type,
) CompileError!void {
- if (build_options.omit_stage2)
- @panic("sadly stage2 is omitted from this build to save memory on the CI server");
-
switch (ty.zigTypeTag()) {
.Struct => return sema.resolveStructLayout(block, src, ty),
.Union => return sema.resolveUnionLayout(block, src, ty),
@@ -26677,6 +27524,11 @@ fn resolveStructLayout(
else => return err,
};
}
+
+ if (struct_obj.layout == .Packed) {
+ try semaBackingIntType(sema.mod, struct_obj);
+ }
+
struct_obj.status = .have_layout;
// In case of querying the ABI alignment of this struct, we will ask
@@ -26696,6 +27548,109 @@ fn resolveStructLayout(
// otherwise it's a tuple; no need to resolve anything
}
+fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!void {
+ const gpa = mod.gpa;
+ const target = mod.getTarget();
+
+ var fields_bit_sum: u64 = 0;
+ for (struct_obj.fields.values()) |field| {
+ fields_bit_sum += field.ty.bitSize(target);
+ }
+
+ const decl_index = struct_obj.owner_decl;
+ const decl = mod.declPtr(decl_index);
+ var decl_arena = decl.value_arena.?.promote(gpa);
+ defer decl.value_arena.?.* = decl_arena.state;
+ const decl_arena_allocator = decl_arena.allocator();
+
+ const zir = struct_obj.namespace.file_scope.zir;
+ const extended = zir.instructions.items(.data)[struct_obj.zir_index].extended;
+ assert(extended.opcode == .struct_decl);
+ const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small);
+
+ if (small.has_backing_int) {
+ var extra_index: usize = extended.operand;
+ extra_index += @boolToInt(small.has_src_node);
+ extra_index += @boolToInt(small.has_fields_len);
+ extra_index += @boolToInt(small.has_decls_len);
+
+ const backing_int_body_len = zir.extra[extra_index];
+ extra_index += 1;
+
+ var analysis_arena = std.heap.ArenaAllocator.init(gpa);
+ defer analysis_arena.deinit();
+
+ var sema: Sema = .{
+ .mod = mod,
+ .gpa = gpa,
+ .arena = analysis_arena.allocator(),
+ .perm_arena = decl_arena_allocator,
+ .code = zir,
+ .owner_decl = decl,
+ .owner_decl_index = decl_index,
+ .func = null,
+ .fn_ret_ty = Type.void,
+ .owner_func = null,
+ };
+ defer sema.deinit();
+
+ var wip_captures = try WipCaptureScope.init(gpa, decl_arena_allocator, decl.src_scope);
+ defer wip_captures.deinit();
+
+ var block: Block = .{
+ .parent = null,
+ .sema = &sema,
+ .src_decl = decl_index,
+ .namespace = &struct_obj.namespace,
+ .wip_capture_scope = wip_captures.scope,
+ .instructions = .{},
+ .inlining = null,
+ .is_comptime = true,
+ };
+ defer {
+ assert(block.instructions.items.len == 0);
+ block.params.deinit(gpa);
+ }
+
+ const backing_int_src: LazySrcLoc = .{ .node_offset_container_tag = 0 };
+ const backing_int_ty = blk: {
+ if (backing_int_body_len == 0) {
+ const backing_int_ref = @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]);
+ break :blk try sema.resolveType(&block, backing_int_src, backing_int_ref);
+ } else {
+ const body = zir.extra[extra_index..][0..backing_int_body_len];
+ const ty_ref = try sema.resolveBody(&block, body, struct_obj.zir_index);
+ break :blk try sema.analyzeAsType(&block, backing_int_src, ty_ref);
+ }
+ };
+
+ try sema.checkBackingIntType(&block, backing_int_src, backing_int_ty, fields_bit_sum);
+ struct_obj.backing_int_ty = try backing_int_ty.copy(decl_arena_allocator);
+ } else {
+ var buf: Type.Payload.Bits = .{
+ .base = .{ .tag = .int_unsigned },
+ .data = @intCast(u16, fields_bit_sum),
+ };
+ struct_obj.backing_int_ty = try Type.initPayload(&buf.base).copy(decl_arena_allocator);
+ }
+}
+
+fn checkBackingIntType(sema: *Sema, block: *Block, src: LazySrcLoc, backing_int_ty: Type, fields_bit_sum: u64) CompileError!void {
+ const target = sema.mod.getTarget();
+
+ if (!backing_int_ty.isInt()) {
+ return sema.fail(block, src, "expected backing integer type, found '{}'", .{backing_int_ty.fmt(sema.mod)});
+ }
+ if (backing_int_ty.bitSize(target) != fields_bit_sum) {
+ return sema.fail(
+ block,
+ src,
+ "backing integer type '{}' has bit size {} but the struct fields have a total bit size of {}",
+ .{ backing_int_ty.fmt(sema.mod), backing_int_ty.bitSize(target), fields_bit_sum },
+ );
+ }
+}
+
fn resolveUnionLayout(
sema: *Sema,
block: *Block,
@@ -26849,8 +27804,6 @@ fn resolveUnionFully(
}
pub fn resolveTypeFields(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!Type {
- if (build_options.omit_stage2)
- @panic("sadly stage2 is omitted from this build to save memory on the CI server");
switch (ty.tag()) {
.@"struct" => {
const struct_obj = ty.castTag(.@"struct").?.data;
@@ -26997,13 +27950,15 @@ fn resolveInferredErrorSetTy(
fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void {
const gpa = mod.gpa;
const decl_index = struct_obj.owner_decl;
- const zir = struct_obj.namespace.file_scope.zir;
+ const file_scope = struct_obj.namespace.file_scope;
+ if (file_scope.status != .success_zir) return error.AnalysisFail;
+ const zir = file_scope.zir;
const extended = zir.instructions.items(.data)[struct_obj.zir_index].extended;
assert(extended.opcode == .struct_decl);
const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small);
var extra_index: usize = extended.operand;
- const src = LazySrcLoc.nodeOffset(struct_obj.node_offset);
+ const src = LazySrcLoc.nodeOffset(0);
extra_index += @boolToInt(small.has_src_node);
const fields_len = if (small.has_fields_len) blk: {
@@ -27018,12 +27973,26 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
break :decls_len decls_len;
} else 0;
+ // The backing integer cannot be handled until `resolveStructLayout()`.
+ if (small.has_backing_int) {
+ const backing_int_body_len = zir.extra[extra_index];
+ extra_index += 1; // backing_int_body_len
+ if (backing_int_body_len == 0) {
+ extra_index += 1; // backing_int_ref
+ } else {
+ extra_index += backing_int_body_len; // backing_int_body_inst
+ }
+ }
+
// Skip over decls.
var decls_it = zir.declIteratorInner(extra_index, decls_len);
while (decls_it.next()) |_| {}
extra_index = decls_it.extra_index;
if (fields_len == 0) {
+ if (struct_obj.layout == .Packed) {
+ try semaBackingIntType(mod, struct_obj);
+ }
struct_obj.status = .have_layout;
return;
}
@@ -27122,12 +28091,12 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
if (gop.found_existing) {
const msg = msg: {
const tree = try sema.getAstTree(&block_scope);
- const field_src = enumFieldSrcLoc(decl, tree.*, struct_obj.node_offset, field_i);
+ const field_src = enumFieldSrcLoc(decl, tree.*, 0, field_i);
const msg = try sema.errMsg(&block_scope, field_src, "duplicate struct field: '{s}'", .{field_name});
errdefer msg.destroy(gpa);
const prev_field_index = struct_obj.fields.getIndex(field_name).?;
- const prev_field_src = enumFieldSrcLoc(decl, tree.*, struct_obj.node_offset, prev_field_index);
+ const prev_field_src = enumFieldSrcLoc(decl, tree.*, 0, prev_field_index);
try sema.mod.errNoteNonLazy(prev_field_src.toSrcLoc(decl), msg, "other field here", .{});
try sema.errNote(&block_scope, src, msg, "struct declared here", .{});
break :msg msg;
@@ -27184,7 +28153,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
if (field_ty.zigTypeTag() == .Opaque) {
const msg = msg: {
const tree = try sema.getAstTree(&block_scope);
- const field_src = enumFieldSrcLoc(decl, tree.*, struct_obj.node_offset, i);
+ const field_src = enumFieldSrcLoc(decl, tree.*, 0, i);
const msg = try sema.errMsg(&block_scope, field_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{});
errdefer msg.destroy(sema.gpa);
@@ -27193,10 +28162,22 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
};
return sema.failWithOwnedErrorMsg(msg);
}
+ if (field_ty.zigTypeTag() == .NoReturn) {
+ const msg = msg: {
+ const tree = try sema.getAstTree(&block_scope);
+ const field_src = enumFieldSrcLoc(decl, tree.*, 0, i);
+ const msg = try sema.errMsg(&block_scope, field_src, "struct fields cannot be 'noreturn'", .{});
+ errdefer msg.destroy(sema.gpa);
+
+ try sema.addDeclaredHereNote(msg, field_ty);
+ break :msg msg;
+ };
+ return sema.failWithOwnedErrorMsg(msg);
+ }
if (struct_obj.layout == .Extern and !sema.validateExternType(field.ty, .other)) {
const msg = msg: {
const tree = try sema.getAstTree(&block_scope);
- const fields_src = enumFieldSrcLoc(decl, tree.*, struct_obj.node_offset, i);
+ const fields_src = enumFieldSrcLoc(decl, tree.*, 0, i);
const msg = try sema.errMsg(&block_scope, fields_src, "extern structs cannot contain fields of type '{}'", .{field.ty.fmt(sema.mod)});
errdefer msg.destroy(sema.gpa);
@@ -27209,7 +28190,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
} else if (struct_obj.layout == .Packed and !(validatePackedType(field.ty))) {
const msg = msg: {
const tree = try sema.getAstTree(&block_scope);
- const fields_src = enumFieldSrcLoc(decl, tree.*, struct_obj.node_offset, i);
+ const fields_src = enumFieldSrcLoc(decl, tree.*, 0, i);
const msg = try sema.errMsg(&block_scope, fields_src, "packed structs cannot contain fields of type '{}'", .{field.ty.fmt(sema.mod)});
errdefer msg.destroy(sema.gpa);
@@ -27266,7 +28247,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
const small = @bitCast(Zir.Inst.UnionDecl.Small, extended.small);
var extra_index: usize = extended.operand;
- const src = LazySrcLoc.nodeOffset(union_obj.node_offset);
+ const src = LazySrcLoc.nodeOffset(0);
extra_index += @boolToInt(small.has_src_node);
const tag_type_ref: Zir.Inst.Ref = if (small.has_tag_type) blk: {
@@ -27299,10 +28280,6 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
extra_index = decls_it.extra_index;
const body = zir.extra[extra_index..][0..body_len];
- if (fields_len == 0) {
- assert(body.len == 0);
- return;
- }
extra_index += body.len;
const decl = mod.declPtr(decl_index);
@@ -27390,6 +28367,10 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
enum_field_names = &union_obj.tag_ty.castTag(.enum_simple).?.data.fields;
}
+ if (fields_len == 0) {
+ return;
+ }
+
const bits_per_field = 4;
const fields_per_u32 = 32 / bits_per_field;
const bit_bags_count = std.math.divCeil(usize, fields_len, fields_per_u32) catch unreachable;
@@ -27490,12 +28471,12 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
if (gop.found_existing) {
const msg = msg: {
const tree = try sema.getAstTree(&block_scope);
- const field_src = enumFieldSrcLoc(decl, tree.*, union_obj.node_offset, field_i);
+ const field_src = enumFieldSrcLoc(decl, tree.*, 0, field_i);
const msg = try sema.errMsg(&block_scope, field_src, "duplicate union field: '{s}'", .{field_name});
errdefer msg.destroy(gpa);
const prev_field_index = union_obj.fields.getIndex(field_name).?;
- const prev_field_src = enumFieldSrcLoc(decl, tree.*, union_obj.node_offset, prev_field_index);
+ const prev_field_src = enumFieldSrcLoc(decl, tree.*, 0, prev_field_index);
try sema.mod.errNoteNonLazy(prev_field_src.toSrcLoc(decl), msg, "other field here", .{});
try sema.errNote(&block_scope, src, msg, "union declared here", .{});
break :msg msg;
@@ -27508,7 +28489,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
if (!enum_has_field) {
const msg = msg: {
const tree = try sema.getAstTree(&block_scope);
- const field_src = enumFieldSrcLoc(decl, tree.*, union_obj.node_offset, field_i);
+ const field_src = enumFieldSrcLoc(decl, tree.*, 0, field_i);
const msg = try sema.errMsg(&block_scope, field_src, "no field named '{s}' in enum '{}'", .{ field_name, union_obj.tag_ty.fmt(sema.mod) });
errdefer msg.destroy(sema.gpa);
try sema.addDeclaredHereNote(msg, union_obj.tag_ty);
@@ -27521,7 +28502,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
if (field_ty.zigTypeTag() == .Opaque) {
const msg = msg: {
const tree = try sema.getAstTree(&block_scope);
- const field_src = enumFieldSrcLoc(decl, tree.*, union_obj.node_offset, field_i);
+ const field_src = enumFieldSrcLoc(decl, tree.*, 0, field_i);
const msg = try sema.errMsg(&block_scope, field_src, "opaque types have unknown size and therefore cannot be directly embedded in unions", .{});
errdefer msg.destroy(sema.gpa);
@@ -27533,7 +28514,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
if (union_obj.layout == .Extern and !sema.validateExternType(field_ty, .union_field)) {
const msg = msg: {
const tree = try sema.getAstTree(&block_scope);
- const field_src = enumFieldSrcLoc(decl, tree.*, union_obj.node_offset, field_i);
+ const field_src = enumFieldSrcLoc(decl, tree.*, 0, field_i);
const msg = try sema.errMsg(&block_scope, field_src, "extern unions cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)});
errdefer msg.destroy(sema.gpa);
@@ -27546,7 +28527,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
} else if (union_obj.layout == .Packed and !(validatePackedType(field_ty))) {
const msg = msg: {
const tree = try sema.getAstTree(&block_scope);
- const fields_src = enumFieldSrcLoc(decl, tree.*, union_obj.node_offset, field_i);
+ const fields_src = enumFieldSrcLoc(decl, tree.*, 0, field_i);
const msg = try sema.errMsg(&block_scope, fields_src, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)});
errdefer msg.destroy(sema.gpa);
@@ -27638,7 +28619,6 @@ fn generateUnionTagTypeNumbered(
.tag_ty = int_ty,
.fields = .{},
.values = .{},
- .node_offset = 0,
};
// Here we pre-allocate the maps using the decl arena.
try enum_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len);
@@ -27696,7 +28676,6 @@ fn generateUnionTagTypeSimple(sema: *Sema, block: *Block, fields_len: usize, may
enum_obj.* = .{
.owner_decl = new_decl_index,
.fields = .{},
- .node_offset = 0,
};
// Here we pre-allocate the maps using the decl arena.
try enum_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len);
@@ -27871,10 +28850,11 @@ pub fn typeHasOnePossibleValue(
.tuple, .anon_struct => {
const tuple = ty.tupleFields();
- for (tuple.values) |val| {
- if (val.tag() == .unreachable_value) {
- return null; // non-comptime field
- }
+ for (tuple.values) |val, i| {
+ const is_comptime = val.tag() != .unreachable_value;
+ if (is_comptime) continue;
+ if ((try sema.typeHasOnePossibleValue(block, src, tuple.types[i])) != null) continue;
+ return null;
}
return Value.initTag(.empty_struct_value);
},
@@ -27882,6 +28862,10 @@ pub fn typeHasOnePossibleValue(
.enum_numbered => {
const resolved_ty = try sema.resolveTypeFields(block, src, ty);
const enum_obj = resolved_ty.castTag(.enum_numbered).?.data;
+ // An explicit tag type is always provided for enum_numbered.
+ if (enum_obj.tag_ty.hasRuntimeBits()) {
+ return null;
+ }
if (enum_obj.fields.count() == 1) {
if (enum_obj.values.count() == 0) {
return Value.zero; // auto-numbered
@@ -27895,6 +28879,9 @@ pub fn typeHasOnePossibleValue(
.enum_full => {
const resolved_ty = try sema.resolveTypeFields(block, src, ty);
const enum_obj = resolved_ty.castTag(.enum_full).?.data;
+ if (enum_obj.tag_ty.hasRuntimeBits()) {
+ return null;
+ }
if (enum_obj.fields.count() == 1) {
if (enum_obj.values.count() == 0) {
return Value.zero; // auto-numbered
@@ -27927,7 +28914,9 @@ pub fn typeHasOnePossibleValue(
const union_obj = resolved_ty.cast(Type.Payload.Union).?.data;
const tag_val = (try sema.typeHasOnePossibleValue(block, src, union_obj.tag_ty)) orelse
return null;
- const only_field = union_obj.fields.values()[0];
+ const fields = union_obj.fields.values();
+ if (fields.len == 0) return Value.initTag(.empty_struct_value);
+ const only_field = fields[0];
if (only_field.ty.eql(resolved_ty, sema.mod)) {
const msg = try Module.ErrorMsg.create(
sema.gpa,
@@ -28006,8 +28995,18 @@ fn enumFieldSrcLoc(
.container_decl_arg_trailing,
=> tree.containerDeclArg(enum_node),
+ .tagged_union,
+ .tagged_union_trailing,
+ => tree.taggedUnion(enum_node),
+ .tagged_union_two,
+ .tagged_union_two_trailing,
+ => tree.taggedUnionTwo(&buffer, enum_node),
+ .tagged_union_enum_tag,
+ .tagged_union_enum_tag_trailing,
+ => tree.taggedUnionEnumTag(enum_node),
+
// Container was constructed with `@Type`.
- else => return LazySrcLoc.nodeOffset(node_offset),
+ else => return LazySrcLoc.nodeOffset(0),
};
var it_index: usize = 0;
for (container_decl.ast.members) |member_node| {
@@ -28437,8 +29436,6 @@ fn typePtrOrOptionalPtrTy(
/// TODO merge these implementations together with the "advanced"/sema_kit pattern seen
/// elsewhere in value.zig
pub fn typeRequiresComptime(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!bool {
- if (build_options.omit_stage2)
- @panic("sadly stage2 is omitted from this build to save memory on the CI server");
return switch (ty.tag()) {
.u1,
.u8,
@@ -28543,7 +29540,7 @@ pub fn typeRequiresComptime(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Typ
=> {
const child_ty = ty.childType();
if (child_ty.zigTypeTag() == .Fn) {
- return false;
+ return child_ty.fnInfo().is_generic;
} else {
return sema.typeRequiresComptime(block, src, child_ty);
}
@@ -28656,7 +29653,9 @@ fn unionFieldAlignment(
src: LazySrcLoc,
field: Module.Union.Field,
) !u32 {
- if (field.abi_align == 0) {
+ if (field.ty.zigTypeTag() == .NoReturn) {
+ return @as(u32, 0);
+ } else if (field.abi_align == 0) {
return sema.typeAbiAlignment(block, src, field.ty);
} else {
return field.abi_align;
@@ -29430,7 +30429,7 @@ fn valuesEqual(
rhs: Value,
ty: Type,
) CompileError!bool {
- return Value.eqlAdvanced(lhs, rhs, ty, sema.mod, sema.kit(block, src));
+ return Value.eqlAdvanced(lhs, ty, rhs, ty, sema.mod, sema.kit(block, src));
}
/// Asserts the values are comparable vectors of type `ty`.
@@ -29478,7 +30477,7 @@ fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type {
// The resulting pointer is aligned to the lcd between the offset (an
// arbitrary number) and the alignment factor (always a power of two,
// non zero).
- const new_align = @as(u32, 1) << @intCast(u5, @ctz(u64, addend | ptr_info.@"align"));
+ const new_align = @as(u32, 1) << @intCast(u5, @ctz(addend | ptr_info.@"align"));
break :a new_align;
};
return try Type.ptr(sema.arena, sema.mod, .{
diff --git a/src/Zir.zig b/src/Zir.zig
index 3aa2378697..ec9ddfcffb 100644
--- a/src/Zir.zig
+++ b/src/Zir.zig
@@ -43,7 +43,11 @@ pub const Header = extern struct {
instructions_len: u32,
string_bytes_len: u32,
extra_len: u32,
-
+ /// We could leave this as padding, however it triggers a Valgrind warning because
+ /// we read and write undefined bytes to the file system. This is harmless, but
+ /// it's essentially free to have a zero field here and makes the warning go away,
+ /// making it more likely that following Valgrind warnings will be taken seriously.
+ unused: u32 = 0,
stat_inode: std.fs.File.INode,
stat_size: u64,
stat_mtime: i128,
@@ -490,14 +494,6 @@ pub const Inst = struct {
/// Merge two error sets into one, `E1 || E2`.
/// Uses the `pl_node` field with payload `Bin`.
merge_error_sets,
- /// Given a reference to a function and a parameter index, returns the
- /// type of the parameter. The only usage of this instruction is for the
- /// result location of parameters of function calls. In the case of a function's
- /// parameter type being `anytype`, it is the type coercion's job to detect this
- /// scenario and skip the coercion, so that semantic analysis of this instruction
- /// is not in a position where it must create an invalid type.
- /// Uses the `param_type` union field.
- param_type,
/// Turns an R-Value into a const L-Value. In other words, it takes a value,
/// stores it in a memory location, and returns a const pointer to it. If the value
/// is `comptime`, the memory location is global static constant data. Otherwise,
@@ -839,8 +835,6 @@ pub const Inst = struct {
round,
/// Implement builtin `@tagName`. Uses `un_node`.
tag_name,
- /// Implement builtin `@Type`. Uses `un_node`.
- reify,
/// Implement builtin `@typeName`. Uses `un_node`.
type_name,
/// Implement builtin `@Frame`. Uses `un_node`.
@@ -1097,7 +1091,6 @@ pub const Inst = struct {
.mul,
.mulwrap,
.mul_sat,
- .param_type,
.ref,
.shl,
.shl_sat,
@@ -1197,7 +1190,6 @@ pub const Inst = struct {
.trunc,
.round,
.tag_name,
- .reify,
.type_name,
.frame_type,
.frame_size,
@@ -1400,7 +1392,6 @@ pub const Inst = struct {
.mul,
.mulwrap,
.mul_sat,
- .param_type,
.ref,
.shl,
.shl_sat,
@@ -1484,7 +1475,6 @@ pub const Inst = struct {
.trunc,
.round,
.tag_name,
- .reify,
.type_name,
.frame_type,
.frame_size,
@@ -1573,7 +1563,6 @@ pub const Inst = struct {
.mulwrap = .pl_node,
.mul_sat = .pl_node,
- .param_type = .param_type,
.param = .pl_tok,
.param_comptime = .pl_tok,
.param_anytype = .str_tok,
@@ -1759,7 +1748,6 @@ pub const Inst = struct {
.trunc = .un_node,
.round = .un_node,
.tag_name = .un_node,
- .reify = .un_node,
.type_name = .un_node,
.frame_type = .un_node,
.frame_size = .un_node,
@@ -1980,6 +1968,10 @@ pub const Inst = struct {
/// Implement builtin `@intToError`.
/// `operand` is payload index to `UnNode`.
int_to_error,
+ /// Implement builtin `@Type`.
+ /// `operand` is payload index to `UnNode`.
+ /// `small` contains `NameStrategy
+ reify,
pub const InstData = struct {
opcode: Extended,
@@ -2541,10 +2533,6 @@ pub const Inst = struct {
/// Points to a `Block`.
payload_index: u32,
},
- param_type: struct {
- callee: Ref,
- param_index: u32,
- },
@"unreachable": struct {
/// Offset from Decl AST node index.
/// `Tag` determines which kind of AST node this points to.
@@ -2615,7 +2603,6 @@ pub const Inst = struct {
ptr_type,
int_type,
bool_br,
- param_type,
@"unreachable",
@"break",
switch_capture,
@@ -2795,7 +2782,9 @@ pub const Inst = struct {
};
/// Stored inside extra, with trailing arguments according to `args_len`.
- /// Each argument is a `Ref`.
+ /// Implicit 0. arg_0_start: u32, // always same as `args_len`
+ /// 1. arg_end: u32, // for each `args_len`
+ /// arg_N_start is the same as arg_N-1_end
pub const Call = struct {
// Note: Flags *must* come first so that unusedResultExpr
// can find it when it goes to modify them.
@@ -3100,13 +3089,16 @@ pub const Inst = struct {
/// 0. src_node: i32, // if has_src_node
/// 1. fields_len: u32, // if has_fields_len
/// 2. decls_len: u32, // if has_decls_len
- /// 3. decl_bits: u32 // for every 8 decls
+ /// 3. backing_int_body_len: u32, // if has_backing_int
+ /// 4. backing_int_ref: Ref, // if has_backing_int and backing_int_body_len is 0
+ /// 5. backing_int_body_inst: Inst, // if has_backing_int and backing_int_body_len is > 0
+ /// 6. decl_bits: u32 // for every 8 decls
/// - sets of 4 bits:
/// 0b000X: whether corresponding decl is pub
/// 0b00X0: whether corresponding decl is exported
/// 0b0X00: whether corresponding decl has an align expression
/// 0bX000: whether corresponding decl has a linksection or an address space expression
- /// 4. decl: { // for every decls_len
+ /// 7. decl: { // for every decls_len
/// src_hash: [4]u32, // hash of source bytes
/// line: u32, // line number of decl, relative to parent
/// name: u32, // null terminated string index
@@ -3124,13 +3116,13 @@ pub const Inst = struct {
/// address_space: Ref,
/// }
/// }
- /// 5. flags: u32 // for every 8 fields
+ /// 8. flags: u32 // for every 8 fields
/// - sets of 4 bits:
/// 0b000X: whether corresponding field has an align expression
/// 0b00X0: whether corresponding field has a default expression
/// 0b0X00: whether corresponding field is comptime
/// 0bX000: whether corresponding field has a type expression
- /// 6. fields: { // for every fields_len
+ /// 9. fields: { // for every fields_len
/// field_name: u32,
/// doc_comment: u32, // 0 if no doc comment
/// field_type: Ref, // if corresponding bit is not set. none means anytype.
@@ -3138,7 +3130,7 @@ pub const Inst = struct {
/// align_body_len: u32, // if corresponding bit is set
/// init_body_len: u32, // if corresponding bit is set
/// }
- /// 7. bodies: { // for every fields_len
+ /// 10. bodies: { // for every fields_len
/// field_type_body_inst: Inst, // for each field_type_body_len
/// align_body_inst: Inst, // for each align_body_len
/// init_body_inst: Inst, // for each init_body_len
@@ -3148,11 +3140,12 @@ pub const Inst = struct {
has_src_node: bool,
has_fields_len: bool,
has_decls_len: bool,
+ has_backing_int: bool,
known_non_opv: bool,
known_comptime_only: bool,
name_strategy: NameStrategy,
layout: std.builtin.Type.ContainerLayout,
- _: u7 = undefined,
+ _: u6 = undefined,
};
};
@@ -3619,6 +3612,16 @@ pub fn declIterator(zir: Zir, decl_inst: u32) DeclIterator {
break :decls_len decls_len;
} else 0;
+ if (small.has_backing_int) {
+ const backing_int_body_len = zir.extra[extra_index];
+ extra_index += 1; // backing_int_body_len
+ if (backing_int_body_len == 0) {
+ extra_index += 1; // backing_int_ref
+ } else {
+ extra_index += backing_int_body_len; // backing_int_body_inst
+ }
+ }
+
return declIteratorInner(zir, extra_index, decls_len);
},
.enum_decl => {
@@ -3915,6 +3918,27 @@ pub const FnInfo = struct {
total_params_len: u32,
};
+pub fn getParamBody(zir: Zir, fn_inst: Inst.Index) []const u32 {
+ const tags = zir.instructions.items(.tag);
+ const datas = zir.instructions.items(.data);
+ const inst_data = datas[fn_inst].pl_node;
+
+ const param_block_index = switch (tags[fn_inst]) {
+ .func, .func_inferred => blk: {
+ const extra = zir.extraData(Inst.Func, inst_data.payload_index);
+ break :blk extra.data.param_block;
+ },
+ .func_fancy => blk: {
+ const extra = zir.extraData(Inst.FuncFancy, inst_data.payload_index);
+ break :blk extra.data.param_block;
+ },
+ else => unreachable,
+ };
+
+ const param_block = zir.extraData(Inst.Block, datas[param_block_index].pl_node.payload_index);
+ return zir.extra[param_block.end..][0..param_block.data.body_len];
+}
+
pub fn getFnInfo(zir: Zir, fn_inst: Inst.Index) FnInfo {
const tags = zir.instructions.items(.tag);
const datas = zir.instructions.items(.data);
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index a8bafee4f8..d256f9a558 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -166,10 +166,12 @@ const MCValue = union(enum) {
/// the type is u1) or true (if the type in bool) iff the
/// specified condition is true.
condition_flags: Condition,
+ /// The value is a function argument passed via the stack.
+ stack_argument_offset: u32,
fn isMemory(mcv: MCValue) bool {
return switch (mcv) {
- .memory, .stack_offset => true,
+ .memory, .stack_offset, .stack_argument_offset => true,
else => false,
};
}
@@ -192,6 +194,7 @@ const MCValue = union(enum) {
.condition_flags,
.ptr_stack_offset,
.undef,
+ .stack_argument_offset,
=> false,
.register,
@@ -337,6 +340,7 @@ pub fn generate(
.prev_di_line = module_fn.lbrace_line,
.prev_di_column = module_fn.lbrace_column,
.stack_size = mem.alignForwardGeneric(u32, function.max_end_stack, function.stack_align),
+ .saved_regs_stack_space = function.saved_regs_stack_space,
};
defer emit.deinit();
@@ -414,6 +418,23 @@ fn gen(self: *Self) !void {
// sub sp, sp, #reloc
const backpatch_reloc = try self.addNop();
+ if (self.ret_mcv == .stack_offset) {
+ // The address of where to store the return value is in x0
+ // (or w0 when pointer size is 32 bits). As this register
+ // might get overwritten along the way, save the address
+ // to the stack.
+ const ptr_bits = self.target.cpu.arch.ptrBitWidth();
+ const ptr_bytes = @divExact(ptr_bits, 8);
+ const ret_ptr_reg = registerAlias(.x0, ptr_bytes);
+
+ const stack_offset = mem.alignForwardGeneric(u32, self.next_stack_offset, ptr_bytes) + ptr_bytes;
+ self.next_stack_offset = stack_offset;
+ self.max_end_stack = @maximum(self.max_end_stack, self.next_stack_offset);
+
+ try self.genSetStack(Type.usize, stack_offset, MCValue{ .register = ret_ptr_reg });
+ self.ret_mcv = MCValue{ .stack_offset = stack_offset };
+ }
+
_ = try self.addInst(.{
.tag = .dbg_prologue_end,
.data = .{ .nop = {} },
@@ -540,33 +561,38 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
switch (air_tags[inst]) {
// zig fmt: off
- .add => try self.airBinOp(inst, .add),
- .addwrap => try self.airBinOp(inst, .addwrap),
- .sub => try self.airBinOp(inst, .sub),
- .subwrap => try self.airBinOp(inst, .subwrap),
- .mul => try self.airBinOp(inst, .mul),
- .mulwrap => try self.airBinOp(inst, .mulwrap),
- .shl => try self.airBinOp(inst, .shl),
- .shl_exact => try self.airBinOp(inst, .shl_exact),
- .bool_and => try self.airBinOp(inst, .bool_and),
- .bool_or => try self.airBinOp(inst, .bool_or),
- .bit_and => try self.airBinOp(inst, .bit_and),
- .bit_or => try self.airBinOp(inst, .bit_or),
- .xor => try self.airBinOp(inst, .xor),
- .shr => try self.airBinOp(inst, .shr),
- .shr_exact => try self.airBinOp(inst, .shr_exact),
+ .add => try self.airBinOp(inst, .add),
+ .addwrap => try self.airBinOp(inst, .addwrap),
+ .sub => try self.airBinOp(inst, .sub),
+ .subwrap => try self.airBinOp(inst, .subwrap),
+ .mul => try self.airBinOp(inst, .mul),
+ .mulwrap => try self.airBinOp(inst, .mulwrap),
+ .shl => try self.airBinOp(inst, .shl),
+ .shl_exact => try self.airBinOp(inst, .shl_exact),
+ .bool_and => try self.airBinOp(inst, .bool_and),
+ .bool_or => try self.airBinOp(inst, .bool_or),
+ .bit_and => try self.airBinOp(inst, .bit_and),
+ .bit_or => try self.airBinOp(inst, .bit_or),
+ .xor => try self.airBinOp(inst, .xor),
+ .shr => try self.airBinOp(inst, .shr),
+ .shr_exact => try self.airBinOp(inst, .shr_exact),
+ .div_float => try self.airBinOp(inst, .div_float),
+ .div_trunc => try self.airBinOp(inst, .div_trunc),
+ .div_floor => try self.airBinOp(inst, .div_floor),
+ .div_exact => try self.airBinOp(inst, .div_exact),
+ .rem => try self.airBinOp(inst, .rem),
+ .mod => try self.airBinOp(inst, .mod),
- .ptr_add => try self.airPtrArithmetic(inst, .ptr_add),
- .ptr_sub => try self.airPtrArithmetic(inst, .ptr_sub),
+ .ptr_add => try self.airPtrArithmetic(inst, .ptr_add),
+ .ptr_sub => try self.airPtrArithmetic(inst, .ptr_sub),
+
+ .min => try self.airMin(inst),
+ .max => try self.airMax(inst),
.add_sat => try self.airAddSat(inst),
.sub_sat => try self.airSubSat(inst),
.mul_sat => try self.airMulSat(inst),
- .rem => try self.airRem(inst),
- .mod => try self.airMod(inst),
.shl_sat => try self.airShlSat(inst),
- .min => try self.airMin(inst),
- .max => try self.airMax(inst),
.slice => try self.airSlice(inst),
.sqrt,
@@ -591,8 +617,6 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.mul_with_overflow => try self.airMulWithOverflow(inst),
.shl_with_overflow => try self.airShlWithOverflow(inst),
- .div_float, .div_trunc, .div_floor, .div_exact => try self.airDiv(inst),
-
.cmp_lt => try self.airCmp(inst, .lt),
.cmp_lte => try self.airCmp(inst, .lte),
.cmp_eq => try self.airCmp(inst, .eq),
@@ -753,6 +777,9 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.float_to_int_optimized,
=> return self.fail("TODO implement optimized float mode", .{}),
+ .is_named_enum_value => return self.fail("TODO implement is_named_enum_value", .{}),
+ .error_set_has_value => return self.fail("TODO implement error_set_has_value", .{}),
+
.wasm_memory_size => unreachable,
.wasm_memory_grow => unreachable,
// zig fmt: on
@@ -1008,17 +1035,43 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void {
if (self.liveness.isUnused(inst))
return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none });
- const operand_ty = self.air.typeOf(ty_op.operand);
- const operand = try self.resolveInst(ty_op.operand);
- const info_a = operand_ty.intInfo(self.target.*);
- const info_b = self.air.typeOfIndex(inst).intInfo(self.target.*);
- if (info_a.signedness != info_b.signedness)
- return self.fail("TODO gen intcast sign safety in semantic analysis", .{});
+ const operand = ty_op.operand;
+ const operand_mcv = try self.resolveInst(operand);
+ const operand_ty = self.air.typeOf(operand);
+ const operand_info = operand_ty.intInfo(self.target.*);
- if (info_a.bits == info_b.bits)
- return self.finishAir(inst, operand, .{ ty_op.operand, .none, .none });
+ const dest_ty = self.air.typeOfIndex(inst);
+ const dest_abi_size = dest_ty.abiSize(self.target.*);
+ const dest_info = dest_ty.intInfo(self.target.*);
- return self.fail("TODO implement intCast for {}", .{self.target.cpu.arch});
+ const result: MCValue = result: {
+ const operand_lock: ?RegisterLock = switch (operand_mcv) {
+ .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
+ else => null,
+ };
+ defer if (operand_lock) |lock| self.register_manager.unlockReg(lock);
+
+ const truncated: MCValue = switch (operand_mcv) {
+ .register => |r| MCValue{ .register = registerAlias(r, dest_abi_size) },
+ else => operand_mcv,
+ };
+
+ if (dest_info.bits > operand_info.bits) {
+ const dest_mcv = try self.allocRegOrMem(inst, true);
+ try self.setRegOrMem(self.air.typeOfIndex(inst), dest_mcv, truncated);
+ break :result dest_mcv;
+ } else {
+ if (self.reuseOperand(inst, operand, 0, truncated)) {
+ break :result truncated;
+ } else {
+ const dest_mcv = try self.allocRegOrMem(inst, true);
+ try self.setRegOrMem(self.air.typeOfIndex(inst), dest_mcv, truncated);
+ break :result dest_mcv;
+ }
+ }
+ };
+
+ return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn truncRegister(
@@ -1044,6 +1097,8 @@ fn truncRegister(
});
},
32, 64 => {
+ assert(dest_reg.size() == operand_reg.size());
+
_ = try self.addInst(.{
.tag = .mov_register,
.data = .{ .rr = .{
@@ -1099,7 +1154,7 @@ fn trunc(
return MCValue{ .register = dest_reg };
} else {
- return self.fail("TODO: truncate to ints > 32 bits", .{});
+ return self.fail("TODO: truncate to ints > 64 bits", .{});
}
}
@@ -1262,6 +1317,9 @@ fn binOpRegister(
const lhs_is_register = lhs == .register;
const rhs_is_register = rhs == .register;
+ if (lhs_is_register) assert(lhs.register == registerAlias(lhs.register, lhs_ty.abiSize(self.target.*)));
+ if (rhs_is_register) assert(rhs.register == registerAlias(rhs.register, rhs_ty.abiSize(self.target.*)));
+
const lhs_lock: ?RegisterLock = if (lhs_is_register)
self.register_manager.lockReg(lhs.register)
else
@@ -1291,13 +1349,22 @@ fn binOpRegister(
const new_lhs_lock = self.register_manager.lockReg(lhs_reg);
defer if (new_lhs_lock) |reg| self.register_manager.unlockReg(reg);
- const rhs_reg = if (rhs_is_register) rhs.register else blk: {
+ const rhs_reg = if (rhs_is_register)
+ // lhs is almost always equal to rhs, except in shifts. In
+ // order to guarantee that registers will have equal sizes, we
+ // use the register alias of rhs corresponding to the size of
+ // lhs.
+ registerAlias(rhs.register, lhs_ty.abiSize(self.target.*))
+ else blk: {
const track_inst: ?Air.Inst.Index = if (metadata) |md| inst: {
break :inst Air.refToIndex(md.rhs).?;
} else null;
const raw_reg = try self.register_manager.allocReg(track_inst, gp);
- const reg = registerAlias(raw_reg, rhs_ty.abiAlignment(self.target.*));
+
+ // Here, we deliberately use lhs as lhs and rhs may differ in
+ // the case of shifts. See comment above.
+ const reg = registerAlias(raw_reg, lhs_ty.abiSize(self.target.*));
if (track_inst) |inst| branch.inst_table.putAssumeCapacity(inst, .{ .register = reg });
@@ -1348,6 +1415,8 @@ fn binOpRegister(
.lsl_register,
.asr_register,
.lsr_register,
+ .sdiv,
+ .udiv,
=> .{ .rrr = .{
.rd = dest_reg,
.rn = lhs_reg,
@@ -1404,6 +1473,8 @@ fn binOpImmediate(
) !MCValue {
const lhs_is_register = lhs == .register;
+ if (lhs_is_register) assert(lhs.register == registerAlias(lhs.register, lhs_ty.abiSize(self.target.*)));
+
const lhs_lock: ?RegisterLock = if (lhs_is_register)
self.register_manager.lockReg(lhs.register)
else
@@ -1586,6 +1657,151 @@ fn binOp(
else => unreachable,
}
},
+ .div_float => {
+ switch (lhs_ty.zigTypeTag()) {
+ .Float => return self.fail("TODO div_float", .{}),
+ .Vector => return self.fail("TODO div_float on vectors", .{}),
+ else => unreachable,
+ }
+ },
+ .div_trunc, .div_floor, .div_exact => {
+ switch (lhs_ty.zigTypeTag()) {
+ .Float => return self.fail("TODO div on floats", .{}),
+ .Vector => return self.fail("TODO div on vectors", .{}),
+ .Int => {
+ assert(lhs_ty.eql(rhs_ty, mod));
+ const int_info = lhs_ty.intInfo(self.target.*);
+ if (int_info.bits <= 64) {
+ switch (int_info.signedness) {
+ .signed => {
+ switch (tag) {
+ .div_trunc, .div_exact => {
+ // TODO optimize integer division by constants
+ return try self.binOpRegister(.sdiv, lhs, rhs, lhs_ty, rhs_ty, metadata);
+ },
+ .div_floor => return self.fail("TODO div_floor on signed integers", .{}),
+ else => unreachable,
+ }
+ },
+ .unsigned => {
+ // TODO optimize integer division by constants
+ return try self.binOpRegister(.udiv, lhs, rhs, lhs_ty, rhs_ty, metadata);
+ },
+ }
+ } else {
+ return self.fail("TODO integer division for ints with bits > 64", .{});
+ }
+ },
+ else => unreachable,
+ }
+ },
+ .rem, .mod => {
+ switch (lhs_ty.zigTypeTag()) {
+ .Float => return self.fail("TODO rem/mod on floats", .{}),
+ .Vector => return self.fail("TODO rem/mod on vectors", .{}),
+ .Int => {
+ assert(lhs_ty.eql(rhs_ty, mod));
+ const int_info = lhs_ty.intInfo(self.target.*);
+ if (int_info.bits <= 64) {
+ if (int_info.signedness == .signed and tag == .mod) {
+ return self.fail("TODO mod on signed integers", .{});
+ } else {
+ const lhs_is_register = lhs == .register;
+ const rhs_is_register = rhs == .register;
+
+ const lhs_lock: ?RegisterLock = if (lhs_is_register)
+ self.register_manager.lockReg(lhs.register)
+ else
+ null;
+ defer if (lhs_lock) |reg| self.register_manager.unlockReg(reg);
+
+ const rhs_lock: ?RegisterLock = if (rhs_is_register)
+ self.register_manager.lockReg(rhs.register)
+ else
+ null;
+ defer if (rhs_lock) |reg| self.register_manager.unlockReg(reg);
+
+ const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
+
+ const lhs_reg = if (lhs_is_register) lhs.register else blk: {
+ const track_inst: ?Air.Inst.Index = if (metadata) |md| inst: {
+ break :inst Air.refToIndex(md.lhs).?;
+ } else null;
+
+ const raw_reg = try self.register_manager.allocReg(track_inst, gp);
+ const reg = registerAlias(raw_reg, lhs_ty.abiSize(self.target.*));
+
+ if (track_inst) |inst| branch.inst_table.putAssumeCapacity(inst, .{ .register = reg });
+
+ break :blk reg;
+ };
+ const new_lhs_lock = self.register_manager.lockReg(lhs_reg);
+ defer if (new_lhs_lock) |reg| self.register_manager.unlockReg(reg);
+
+ const rhs_reg = if (rhs_is_register) rhs.register else blk: {
+ const track_inst: ?Air.Inst.Index = if (metadata) |md| inst: {
+ break :inst Air.refToIndex(md.rhs).?;
+ } else null;
+
+ const raw_reg = try self.register_manager.allocReg(track_inst, gp);
+ const reg = registerAlias(raw_reg, rhs_ty.abiAlignment(self.target.*));
+
+ if (track_inst) |inst| branch.inst_table.putAssumeCapacity(inst, .{ .register = reg });
+
+ break :blk reg;
+ };
+ const new_rhs_lock = self.register_manager.lockReg(rhs_reg);
+ defer if (new_rhs_lock) |reg| self.register_manager.unlockReg(reg);
+
+ const dest_regs: [2]Register = blk: {
+ const raw_regs = try self.register_manager.allocRegs(2, .{ null, null }, gp);
+ const abi_size = lhs_ty.abiSize(self.target.*);
+ break :blk .{
+ registerAlias(raw_regs[0], abi_size),
+ registerAlias(raw_regs[1], abi_size),
+ };
+ };
+ const dest_regs_locks = self.register_manager.lockRegsAssumeUnused(2, dest_regs);
+ defer for (dest_regs_locks) |reg| {
+ self.register_manager.unlockReg(reg);
+ };
+ const quotient_reg = dest_regs[0];
+ const remainder_reg = dest_regs[1];
+
+ if (!lhs_is_register) try self.genSetReg(lhs_ty, lhs_reg, lhs);
+ if (!rhs_is_register) try self.genSetReg(rhs_ty, rhs_reg, rhs);
+
+ _ = try self.addInst(.{
+ .tag = switch (int_info.signedness) {
+ .signed => .sdiv,
+ .unsigned => .udiv,
+ },
+ .data = .{ .rrr = .{
+ .rd = quotient_reg,
+ .rn = lhs_reg,
+ .rm = rhs_reg,
+ } },
+ });
+
+ _ = try self.addInst(.{
+ .tag = .msub,
+ .data = .{ .rrrr = .{
+ .rd = remainder_reg,
+ .rn = quotient_reg,
+ .rm = rhs_reg,
+ .ra = lhs_reg,
+ } },
+ });
+
+ return MCValue{ .register = remainder_reg };
+ }
+ } else {
+ return self.fail("TODO rem/mod for integers with bits > 64", .{});
+ }
+ },
+ else => unreachable,
+ }
+ },
.addwrap,
.subwrap,
.mulwrap,
@@ -1869,7 +2085,7 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
try self.truncRegister(dest_reg, truncated_reg, int_info.signedness, int_info.bits);
// cmp dest, truncated
- _ = try self.binOp(.cmp_eq, dest, .{ .register = truncated_reg }, Type.usize, Type.usize, null);
+ _ = try self.binOp(.cmp_eq, dest, .{ .register = truncated_reg }, lhs_ty, lhs_ty, null);
try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg });
try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .condition_flags = .ne });
@@ -2257,24 +2473,6 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none });
}
-fn airDiv(self: *Self, inst: Air.Inst.Index) !void {
- const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement div for {}", .{self.target.cpu.arch});
- return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
-}
-
-fn airRem(self: *Self, inst: Air.Inst.Index) !void {
- const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement rem for {}", .{self.target.cpu.arch});
- return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
-}
-
-fn airMod(self: *Self, inst: Air.Inst.Index) !void {
- const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement mod for {}", .{self.target.cpu.arch});
- return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
-}
-
fn airShlSat(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement shl_sat for {}", .{self.target.cpu.arch});
@@ -2313,6 +2511,9 @@ fn errUnionErr(self: *Self, error_union_mcv: MCValue, error_union_ty: Type) !MCV
const err_offset = @intCast(u32, errUnionErrorOffset(payload_ty, self.target.*));
switch (error_union_mcv) {
.register => return self.fail("TODO errUnionErr for registers", .{}),
+ .stack_argument_offset => |off| {
+ return MCValue{ .stack_argument_offset = off + err_offset };
+ },
.stack_offset => |off| {
return MCValue{ .stack_offset = off - err_offset };
},
@@ -2347,6 +2548,9 @@ fn errUnionPayload(self: *Self, error_union_mcv: MCValue, error_union_ty: Type)
const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, self.target.*));
switch (error_union_mcv) {
.register => return self.fail("TODO errUnionPayload for registers", .{}),
+ .stack_argument_offset => |off| {
+ return MCValue{ .stack_argument_offset = off + payload_offset };
+ },
.stack_offset => |off| {
return MCValue{ .stack_offset = off - payload_offset };
},
@@ -2436,21 +2640,28 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
+fn slicePtr(mcv: MCValue) MCValue {
+ switch (mcv) {
+ .dead, .unreach, .none => unreachable,
+ .register => unreachable, // a slice doesn't fit in one register
+ .stack_argument_offset => |off| {
+ return MCValue{ .stack_argument_offset = off };
+ },
+ .stack_offset => |off| {
+ return MCValue{ .stack_offset = off };
+ },
+ .memory => |addr| {
+ return MCValue{ .memory = addr };
+ },
+ else => unreachable, // invalid MCValue for a slice
+ }
+}
+
fn airSlicePtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const mcv = try self.resolveInst(ty_op.operand);
- switch (mcv) {
- .dead, .unreach, .none => unreachable,
- .register => unreachable, // a slice doesn't fit in one register
- .stack_offset => |off| {
- break :result MCValue{ .stack_offset = off };
- },
- .memory => |addr| {
- break :result MCValue{ .memory = addr };
- },
- else => return self.fail("TODO implement slice_len for {}", .{mcv}),
- }
+ break :result slicePtr(mcv);
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
@@ -2464,6 +2675,9 @@ fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void {
switch (mcv) {
.dead, .unreach, .none => unreachable,
.register => unreachable, // a slice doesn't fit in one register
+ .stack_argument_offset => |off| {
+ break :result MCValue{ .stack_argument_offset = off + ptr_bytes };
+ },
.stack_offset => |off| {
break :result MCValue{ .stack_offset = off - ptr_bytes };
},
@@ -2514,6 +2728,9 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
if (!is_volatile and self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none });
const result: MCValue = result: {
+ const slice_ty = self.air.typeOf(bin_op.lhs);
+ const elem_ty = slice_ty.childType();
+ const elem_size = elem_ty.abiSize(self.target.*);
const slice_mcv = try self.resolveInst(bin_op.lhs);
// TODO optimize for the case where the index is a constant,
@@ -2521,10 +2738,6 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
const index_mcv = try self.resolveInst(bin_op.rhs);
const index_is_register = index_mcv == .register;
- const slice_ty = self.air.typeOf(bin_op.lhs);
- const elem_ty = slice_ty.childType();
- const elem_size = elem_ty.abiSize(self.target.*);
-
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf);
@@ -2534,15 +2747,17 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
null;
defer if (index_lock) |reg| self.register_manager.unlockReg(reg);
- const base_mcv: MCValue = switch (slice_mcv) {
- .stack_offset => |off| .{ .register = try self.copyToTmpRegister(slice_ptr_field_type, .{ .stack_offset = off }) },
- else => return self.fail("TODO slice_elem_val when slice is {}", .{slice_mcv}),
- };
- const base_lock = self.register_manager.lockRegAssumeUnused(base_mcv.register);
- defer self.register_manager.unlockReg(base_lock);
+ const base_mcv = slicePtr(slice_mcv);
switch (elem_size) {
else => {
+ const base_reg = switch (base_mcv) {
+ .register => |r| r,
+ else => try self.copyToTmpRegister(slice_ptr_field_type, base_mcv),
+ };
+ const base_reg_lock = self.register_manager.lockRegAssumeUnused(base_reg);
+ defer self.register_manager.unlockReg(base_reg_lock);
+
const dest = try self.allocRegOrMem(inst, true);
const addr = try self.binOp(.ptr_add, base_mcv, index_mcv, slice_ptr_field_type, Type.usize, null);
try self.load(dest, addr, slice_ptr_field_type);
@@ -2557,7 +2772,16 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement slice_elem_ptr for {}", .{self.target.cpu.arch});
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
+ const slice_mcv = try self.resolveInst(extra.lhs);
+ const index_mcv = try self.resolveInst(extra.rhs);
+ const base_mcv = slicePtr(slice_mcv);
+
+ const slice_ty = self.air.typeOf(extra.lhs);
+
+ const addr = try self.binOp(.ptr_add, base_mcv, index_mcv, slice_ty, Type.usize, null);
+ break :result addr;
+ };
return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none });
}
@@ -2577,7 +2801,15 @@ fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void {
fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement ptr_elem_ptr for {}", .{self.target.cpu.arch});
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
+ const ptr_mcv = try self.resolveInst(extra.lhs);
+ const index_mcv = try self.resolveInst(extra.rhs);
+
+ const ptr_ty = self.air.typeOf(extra.lhs);
+
+ const addr = try self.binOp(.ptr_add, ptr_mcv, index_mcv, ptr_ty, Type.usize, null);
+ break :result addr;
+ };
return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none });
}
@@ -2726,6 +2958,7 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
},
.memory,
.stack_offset,
+ .stack_argument_offset,
.got_load,
.direct_load,
=> {
@@ -2907,6 +3140,8 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
defer if (addr_reg_lock) |reg| self.register_manager.unlockReg(reg);
switch (value) {
+ .dead => unreachable,
+ .undef => unreachable,
.register => |value_reg| {
try self.genStrRegister(value_reg, addr_reg, value_ty);
},
@@ -2920,13 +3155,48 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
try self.genSetReg(value_ty, tmp_reg, value);
try self.store(ptr, .{ .register = tmp_reg }, ptr_ty, value_ty);
} else {
- return self.fail("TODO implement memcpy", .{});
+ const regs = try self.register_manager.allocRegs(4, .{ null, null, null, null }, gp);
+ const regs_locks = self.register_manager.lockRegsAssumeUnused(4, regs);
+ defer for (regs_locks) |reg| {
+ self.register_manager.unlockReg(reg);
+ };
+
+ const src_reg = addr_reg;
+ const dst_reg = regs[0];
+ const len_reg = regs[1];
+ const count_reg = regs[2];
+ const tmp_reg = regs[3];
+
+ switch (value) {
+ .stack_offset => |off| {
+ // sub src_reg, fp, #off
+ try self.genSetReg(ptr_ty, src_reg, .{ .ptr_stack_offset = off });
+ },
+ .memory => |addr| try self.genSetReg(Type.usize, src_reg, .{ .immediate = @intCast(u32, addr) }),
+ .stack_argument_offset => |off| {
+ _ = try self.addInst(.{
+ .tag = .ldr_ptr_stack_argument,
+ .data = .{ .load_store_stack = .{
+ .rt = src_reg,
+ .offset = off,
+ } },
+ });
+ },
+ else => return self.fail("TODO store {} to register", .{value}),
+ }
+
+ // mov len, #abi_size
+ try self.genSetReg(Type.usize, len_reg, .{ .immediate = abi_size });
+
+ // memcpy(src, dst, len)
+ try self.genInlineMemcpy(src_reg, dst_reg, len_reg, count_reg, tmp_reg);
}
},
}
},
.memory,
.stack_offset,
+ .stack_argument_offset,
.got_load,
.direct_load,
=> {
@@ -3005,10 +3275,14 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const mcv = try self.resolveInst(operand);
const struct_ty = self.air.typeOf(operand);
+ const struct_field_ty = struct_ty.structFieldType(index);
const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*));
switch (mcv) {
.dead, .unreach => unreachable,
+ .stack_argument_offset => |off| {
+ break :result MCValue{ .stack_argument_offset = off + struct_field_offset };
+ },
.stack_offset => |off| {
break :result MCValue{ .stack_offset = off - struct_field_offset };
},
@@ -3016,29 +3290,28 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
break :result MCValue{ .memory = addr + struct_field_offset };
},
.register_with_overflow => |rwo| {
- switch (index) {
- 0 => {
- // get wrapped value: return register
- break :result MCValue{ .register = rwo.reg };
- },
- 1 => {
- // TODO return special MCValue condition flags
- // get overflow bit: set register to C flag
- // resp. V flag
- const raw_dest_reg = try self.register_manager.allocReg(null, gp);
- const dest_reg = raw_dest_reg.to32();
+ const reg_lock = self.register_manager.lockRegAssumeUnused(rwo.reg);
+ defer self.register_manager.unlockReg(reg_lock);
- _ = try self.addInst(.{
- .tag = .cset,
- .data = .{ .r_cond = .{
- .rd = dest_reg,
- .cond = rwo.flag,
- } },
- });
+ const field: MCValue = switch (index) {
+ // get wrapped value: return register
+ 0 => MCValue{ .register = rwo.reg },
+
+ // get overflow bit: return C or V flag
+ 1 => MCValue{ .condition_flags = rwo.flag },
- break :result MCValue{ .register = dest_reg };
- },
else => unreachable,
+ };
+
+ if (self.reuseOperand(inst, operand, 0, field)) {
+ break :result field;
+ } else {
+ // Copy to new register
+ const raw_dest_reg = try self.register_manager.allocReg(null, gp);
+ const dest_reg = registerAlias(raw_dest_reg, struct_field_ty.abiSize(self.target.*));
+ try self.genSetReg(struct_field_ty, dest_reg, field);
+
+ break :result MCValue{ .register = dest_reg };
}
},
else => return self.fail("TODO implement codegen struct_field_val for {}", .{mcv}),
@@ -3143,6 +3416,31 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
// saving compare flags may require a new caller-saved register
try self.spillCompareFlagsIfOccupied();
+ if (info.return_value == .stack_offset) {
+ log.debug("airCall: return by reference", .{});
+ const ret_ty = fn_ty.fnReturnType();
+ const ret_abi_size = @intCast(u32, ret_ty.abiSize(self.target.*));
+ const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(self.target.*));
+ const stack_offset = try self.allocMem(inst, ret_abi_size, ret_abi_align);
+
+ const ptr_bits = self.target.cpu.arch.ptrBitWidth();
+ const ptr_bytes = @divExact(ptr_bits, 8);
+ const ret_ptr_reg = registerAlias(.x0, ptr_bytes);
+
+ var ptr_ty_payload: Type.Payload.ElemType = .{
+ .base = .{ .tag = .single_mut_pointer },
+ .data = ret_ty,
+ };
+ const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+ try self.register_manager.getReg(ret_ptr_reg, null);
+ try self.genSetReg(ptr_ty, ret_ptr_reg, .{ .ptr_stack_offset = stack_offset });
+
+ info.return_value = .{ .stack_offset = stack_offset };
+ }
+
+ // Make space for the arguments passed via the stack
+ self.max_end_stack += info.stack_byte_count;
+
for (info.args) |mc_arg, arg_i| {
const arg = args[arg_i];
const arg_ty = self.air.typeOf(arg);
@@ -3154,12 +3452,12 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
try self.register_manager.getReg(reg, null);
try self.genSetReg(arg_ty, reg, arg_mcv);
},
- .stack_offset => {
- return self.fail("TODO implement calling with parameters in memory", .{});
- },
- .ptr_stack_offset => {
- return self.fail("TODO implement calling with MCValue.ptr_stack_offset arg", .{});
- },
+ .stack_offset => unreachable,
+ .stack_argument_offset => |offset| try self.genSetStackArgument(
+ arg_ty,
+ offset,
+ arg_mcv,
+ ),
else => unreachable,
}
}
@@ -3303,8 +3601,15 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void {
},
.stack_offset => {
// Return result by reference
- // TODO
- return self.fail("TODO implement airRet for {}", .{self.ret_mcv});
+ //
+ // self.ret_mcv is an address to where this function
+ // should store its result into
+ var ptr_ty_payload: Type.Payload.ElemType = .{
+ .base = .{ .tag = .single_mut_pointer },
+ .data = ret_ty,
+ };
+ const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+ try self.store(self.ret_mcv, operand, ptr_ty, ret_ty);
},
else => unreachable,
}
@@ -3330,10 +3635,34 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void {
},
.stack_offset => {
// Return result by reference
- // TODO
- return self.fail("TODO implement airRetLoad for {}", .{self.ret_mcv});
+ //
+ // self.ret_mcv is an address to where this function
+ // should store its result into
+ //
+ // If the operand is a ret_ptr instruction, we are done
+ // here. Else we need to load the result from the location
+ // pointed to by the operand and store it to the result
+ // location.
+ const op_inst = Air.refToIndex(un_op).?;
+ if (self.air.instructions.items(.tag)[op_inst] != .ret_ptr) {
+ const abi_size = @intCast(u32, ret_ty.abiSize(self.target.*));
+ const abi_align = ret_ty.abiAlignment(self.target.*);
+
+ // This is essentially allocMem without the
+ // instruction tracking
+ if (abi_align > self.stack_align)
+ self.stack_align = abi_align;
+ // TODO find a free slot instead of always appending
+ const offset = mem.alignForwardGeneric(u32, self.next_stack_offset, abi_align) + abi_size;
+ self.next_stack_offset = offset;
+ self.max_end_stack = @maximum(self.max_end_stack, self.next_stack_offset);
+
+ const tmp_mcv = MCValue{ .stack_offset = offset };
+ try self.load(tmp_mcv, ptr, ptr_ty);
+ try self.store(self.ret_mcv, tmp_mcv, ptr_ty, ret_ty);
+ }
},
- else => unreachable,
+ else => unreachable, // invalid return result
}
try self.exitlude_jump_relocs.append(self.gpa, try self.addNop());
@@ -3635,40 +3964,14 @@ fn isNonNull(self: *Self, operand: MCValue) !MCValue {
fn isErr(self: *Self, ty: Type, operand: MCValue) !MCValue {
const error_type = ty.errorUnionSet();
- const payload_type = ty.errorUnionPayload();
+ const error_int_type = Type.initTag(.u16);
if (error_type.errorSetIsEmpty()) {
return MCValue{ .immediate = 0 }; // always false
}
- const err_off = errUnionErrorOffset(payload_type, self.target.*);
- switch (operand) {
- .stack_offset => |off| {
- const offset = off - @intCast(u32, err_off);
- const tmp_reg = try self.copyToTmpRegister(Type.anyerror, .{ .stack_offset = offset });
- _ = try self.addInst(.{
- .tag = .cmp_immediate,
- .data = .{ .r_imm12_sh = .{
- .rn = tmp_reg,
- .imm12 = 0,
- } },
- });
- },
- .register => |reg| {
- if (err_off > 0 or payload_type.hasRuntimeBitsIgnoreComptime()) {
- return self.fail("TODO implement isErr for register operand with payload bits", .{});
- }
- _ = try self.addInst(.{
- .tag = .cmp_immediate,
- .data = .{ .r_imm12_sh = .{
- .rn = reg,
- .imm12 = 0,
- } },
- });
- },
- else => return self.fail("TODO implement isErr for {}", .{operand}),
- }
-
+ const error_mcv = try self.errUnionErr(operand, ty);
+ _ = try self.binOp(.cmp_eq, error_mcv, .{ .immediate = 0 }, error_int_type, error_int_type, null);
return MCValue{ .condition_flags = .hi };
}
@@ -3886,7 +4189,7 @@ fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void {
block_data.mcv = switch (operand_mcv) {
.none, .dead, .unreach => unreachable,
.register, .stack_offset, .memory => operand_mcv,
- .immediate, .condition_flags => blk: {
+ .immediate, .stack_argument_offset, .condition_flags => blk: {
const new_mcv = try self.allocRegOrMem(block, true);
try self.setRegOrMem(self.air.typeOfIndex(block), new_mcv, operand_mcv);
break :blk new_mcv;
@@ -4128,6 +4431,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
.got_load,
.direct_load,
.memory,
+ .stack_argument_offset,
.stack_offset,
=> {
switch (mcv) {
@@ -4166,6 +4470,15 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
// sub src_reg, fp, #off
try self.genSetReg(ptr_ty, src_reg, .{ .ptr_stack_offset = off });
},
+ .stack_argument_offset => |off| {
+ _ = try self.addInst(.{
+ .tag = .ldr_ptr_stack_argument,
+ .data = .{ .load_store_stack = .{
+ .rt = src_reg,
+ .offset = off,
+ } },
+ });
+ },
.memory => |addr| try self.genSetReg(Type.usize, src_reg, .{ .immediate = addr }),
.got_load,
.direct_load,
@@ -4269,6 +4582,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
}
},
.register => |src_reg| {
+ assert(src_reg.size() == reg.size());
+
// If the registers are the same, nothing to do.
if (src_reg.id() == reg.id())
return;
@@ -4330,6 +4645,196 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
else => unreachable,
}
},
+ .stack_argument_offset => |off| {
+ const abi_size = ty.abiSize(self.target.*);
+
+ switch (abi_size) {
+ 1, 2, 4, 8 => {
+ const tag: Mir.Inst.Tag = switch (abi_size) {
+ 1 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsb_stack_argument else .ldrb_stack_argument,
+ 2 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsh_stack_argument else .ldrh_stack_argument,
+ 4, 8 => .ldr_stack_argument,
+ else => unreachable, // unexpected abi size
+ };
+
+ _ = try self.addInst(.{
+ .tag = tag,
+ .data = .{ .load_store_stack = .{
+ .rt = reg,
+ .offset = @intCast(u32, off),
+ } },
+ });
+ },
+ 3, 5, 6, 7 => return self.fail("TODO implement genSetReg types size {}", .{abi_size}),
+ else => unreachable,
+ }
+ },
+ }
+}
+
+fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void {
+ const abi_size = @intCast(u32, ty.abiSize(self.target.*));
+ switch (mcv) {
+ .dead => unreachable,
+ .none, .unreach => return,
+ .undef => {
+ if (!self.wantSafety())
+ return; // The already existing value will do just fine.
+ // TODO Upgrade this to a memset call when we have that available.
+ switch (ty.abiSize(self.target.*)) {
+ 1 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaa }),
+ 2 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaa }),
+ 4 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaa }),
+ 8 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaaaaaaaaaa }),
+ else => return self.fail("TODO implement memset", .{}),
+ }
+ },
+ .register => |reg| {
+ switch (abi_size) {
+ 1, 2, 4, 8 => {
+ const tag: Mir.Inst.Tag = switch (abi_size) {
+ 1 => .strb_immediate,
+ 2 => .strh_immediate,
+ 4, 8 => .str_immediate,
+ else => unreachable, // unexpected abi size
+ };
+ const rt = registerAlias(reg, abi_size);
+ const offset = switch (abi_size) {
+ 1 => blk: {
+ if (math.cast(u12, stack_offset)) |imm| {
+ break :blk Instruction.LoadStoreOffset.imm(imm);
+ } else {
+ return self.fail("TODO genSetStackArgument byte with larger offset", .{});
+ }
+ },
+ 2 => blk: {
+ assert(std.mem.isAlignedGeneric(u32, stack_offset, 2)); // misaligned stack entry
+ if (math.cast(u12, @divExact(stack_offset, 2))) |imm| {
+ break :blk Instruction.LoadStoreOffset.imm(imm);
+ } else {
+ return self.fail("TODO getSetStackArgument halfword with larger offset", .{});
+ }
+ },
+ 4, 8 => blk: {
+ const alignment = abi_size;
+ assert(std.mem.isAlignedGeneric(u32, stack_offset, alignment)); // misaligned stack entry
+ if (math.cast(u12, @divExact(stack_offset, alignment))) |imm| {
+ break :blk Instruction.LoadStoreOffset.imm(imm);
+ } else {
+ return self.fail("TODO genSetStackArgument with larger offset", .{});
+ }
+ },
+ else => unreachable,
+ };
+
+ _ = try self.addInst(.{
+ .tag = tag,
+ .data = .{ .load_store_register_immediate = .{
+ .rt = rt,
+ .rn = .sp,
+ .offset = offset.immediate,
+ } },
+ });
+ },
+ else => return self.fail("TODO genSetStackArgument other types abi_size={}", .{abi_size}),
+ }
+ },
+ .register_with_overflow => {
+ return self.fail("TODO implement genSetStackArgument {}", .{mcv});
+ },
+ .got_load,
+ .direct_load,
+ .memory,
+ .stack_argument_offset,
+ .stack_offset,
+ => {
+ if (abi_size <= 4) {
+ const reg = try self.copyToTmpRegister(ty, mcv);
+ return self.genSetStackArgument(ty, stack_offset, MCValue{ .register = reg });
+ } else {
+ var ptr_ty_payload: Type.Payload.ElemType = .{
+ .base = .{ .tag = .single_mut_pointer },
+ .data = ty,
+ };
+ const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+
+ // TODO call extern memcpy
+ const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null }, gp);
+ const regs_locks = self.register_manager.lockRegsAssumeUnused(5, regs);
+ defer for (regs_locks) |reg| {
+ self.register_manager.unlockReg(reg);
+ };
+
+ const src_reg = regs[0];
+ const dst_reg = regs[1];
+ const len_reg = regs[2];
+ const count_reg = regs[3];
+ const tmp_reg = regs[4];
+
+ switch (mcv) {
+ .stack_offset => |off| {
+ // sub src_reg, fp, #off
+ try self.genSetReg(ptr_ty, src_reg, .{ .ptr_stack_offset = off });
+ },
+ .stack_argument_offset => |off| {
+ _ = try self.addInst(.{
+ .tag = .ldr_ptr_stack_argument,
+ .data = .{ .load_store_stack = .{
+ .rt = src_reg,
+ .offset = off,
+ } },
+ });
+ },
+ .memory => |addr| try self.genSetReg(ptr_ty, src_reg, .{ .immediate = @intCast(u32, addr) }),
+ .got_load,
+ .direct_load,
+ => |sym_index| {
+ const tag: Mir.Inst.Tag = switch (mcv) {
+ .got_load => .load_memory_ptr_got,
+ .direct_load => .load_memory_ptr_direct,
+ else => unreachable,
+ };
+ const mod = self.bin_file.options.module.?;
+ _ = try self.addInst(.{
+ .tag = tag,
+ .data = .{
+ .payload = try self.addExtra(Mir.LoadMemoryPie{
+ .register = @enumToInt(src_reg),
+ .atom_index = mod.declPtr(self.mod_fn.owner_decl).link.macho.sym_index,
+ .sym_index = sym_index,
+ }),
+ },
+ });
+ },
+ else => unreachable,
+ }
+
+ // add dst_reg, sp, #stack_offset
+ _ = try self.addInst(.{
+ .tag = .add_immediate,
+ .data = .{ .rr_imm12_sh = .{
+ .rd = dst_reg,
+ .rn = .sp,
+ .imm12 = math.cast(u12, stack_offset) orelse {
+ return self.fail("TODO load: set reg to stack offset with all possible offsets", .{});
+ },
+ } },
+ });
+
+ // mov len, #abi_size
+ try self.genSetReg(Type.usize, len_reg, .{ .immediate = abi_size });
+
+ // memcpy(src, dst, len)
+ try self.genInlineMemcpy(src_reg, dst_reg, len_reg, count_reg, tmp_reg);
+ }
+ },
+ .condition_flags,
+ .immediate,
+ .ptr_stack_offset,
+ => {
+ const reg = try self.copyToTmpRegister(ty, mcv);
+ return self.genSetStackArgument(ty, stack_offset, MCValue{ .register = reg });
+ },
}
}
@@ -4799,11 +5304,27 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
result.stack_align = 1;
return result;
},
- .Unspecified, .C => {
+ .C => {
// ARM64 Procedure Call Standard
var ncrn: usize = 0; // Next Core Register Number
var nsaa: u32 = 0; // Next stacked argument address
+ if (ret_ty.zigTypeTag() == .NoReturn) {
+ result.return_value = .{ .unreach = {} };
+ } else if (!ret_ty.hasRuntimeBitsIgnoreComptime() and !ret_ty.isError()) {
+ result.return_value = .{ .none = {} };
+ } else {
+ const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*));
+ if (ret_ty_size == 0) {
+ assert(ret_ty.isError());
+ result.return_value = .{ .immediate = 0 };
+ } else if (ret_ty_size <= 8) {
+ result.return_value = .{ .register = registerAlias(c_abi_int_return_regs[0], ret_ty_size) };
+ } else {
+ return self.fail("TODO support more return types for ARM backend", .{});
+ }
+ }
+
for (param_types) |ty, i| {
const param_size = @intCast(u32, ty.abiSize(self.target.*));
if (param_size == 0) {
@@ -4837,7 +5358,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
}
}
- result.args[i] = .{ .stack_offset = nsaa };
+ result.args[i] = .{ .stack_argument_offset = nsaa };
nsaa += param_size;
}
}
@@ -4845,28 +5366,49 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
result.stack_byte_count = nsaa;
result.stack_align = 16;
},
+ .Unspecified => {
+ if (ret_ty.zigTypeTag() == .NoReturn) {
+ result.return_value = .{ .unreach = {} };
+ } else if (!ret_ty.hasRuntimeBitsIgnoreComptime() and !ret_ty.isError()) {
+ result.return_value = .{ .none = {} };
+ } else {
+ const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*));
+ if (ret_ty_size == 0) {
+ assert(ret_ty.isError());
+ result.return_value = .{ .immediate = 0 };
+ } else if (ret_ty_size <= 8) {
+ result.return_value = .{ .register = registerAlias(.x0, ret_ty_size) };
+ } else {
+ // The result is returned by reference, not by
+ // value. This means that x0 (or w0 when pointer
+ // size is 32 bits) will contain the address of
+ // where this function should write the result
+ // into.
+ result.return_value = .{ .stack_offset = 0 };
+ }
+ }
+
+ var stack_offset: u32 = 0;
+
+ for (param_types) |ty, i| {
+ if (ty.abiSize(self.target.*) > 0) {
+ const param_size = @intCast(u32, ty.abiSize(self.target.*));
+ const param_alignment = ty.abiAlignment(self.target.*);
+
+ stack_offset = std.mem.alignForwardGeneric(u32, stack_offset, param_alignment);
+ result.args[i] = .{ .stack_argument_offset = stack_offset };
+ stack_offset += param_size;
+ } else {
+ result.args[i] = .{ .none = {} };
+ }
+ }
+
+ result.stack_byte_count = stack_offset;
+ result.stack_align = 16;
+ },
else => return self.fail("TODO implement function parameters for {} on aarch64", .{cc}),
}
- if (ret_ty.zigTypeTag() == .NoReturn) {
- result.return_value = .{ .unreach = {} };
- } else if (!ret_ty.hasRuntimeBitsIgnoreComptime() and !ret_ty.isError()) {
- result.return_value = .{ .none = {} };
- } else switch (cc) {
- .Naked => unreachable,
- .Unspecified, .C => {
- const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*));
- if (ret_ty_size == 0) {
- assert(ret_ty.isError());
- result.return_value = .{ .immediate = 0 };
- } else if (ret_ty_size <= 8) {
- result.return_value = .{ .register = registerAlias(c_abi_int_return_regs[0], ret_ty_size) };
- } else {
- return self.fail("TODO support more return types for ARM backend", .{});
- }
- },
- else => return self.fail("TODO implement function return values for {}", .{cc}),
- }
return result;
}
diff --git a/src/arch/aarch64/Emit.zig b/src/arch/aarch64/Emit.zig
index 47a0c08893..00a2ff380a 100644
--- a/src/arch/aarch64/Emit.zig
+++ b/src/arch/aarch64/Emit.zig
@@ -27,14 +27,21 @@ code: *std.ArrayList(u8),
prev_di_line: u32,
prev_di_column: u32,
+
/// Relative to the beginning of `code`.
prev_di_pc: usize,
+/// The amount of stack space consumed by the saved callee-saved
+/// registers in bytes
+saved_regs_stack_space: u32,
+
/// The branch type of every branch
branch_types: std.AutoHashMapUnmanaged(Mir.Inst.Index, BranchType) = .{},
+
/// For every forward branch, maps the target instruction to a list of
/// branches which branch to this target instruction
branch_forward_origins: std.AutoHashMapUnmanaged(Mir.Inst.Index, std.ArrayListUnmanaged(Mir.Inst.Index)) = .{},
+
/// For backward branches: stores the code offset of the target
/// instruction
///
@@ -42,6 +49,8 @@ branch_forward_origins: std.AutoHashMapUnmanaged(Mir.Inst.Index, std.ArrayListUn
/// instruction
code_offset_mapping: std.AutoHashMapUnmanaged(Mir.Inst.Index, usize) = .{},
+/// The final stack frame size of the function (already aligned to the
+/// respective stack alignment). Does not include prologue stack space.
stack_size: u32,
const InnerError = error{
@@ -82,9 +91,11 @@ pub fn emitMir(
.sub_immediate => try emit.mirAddSubtractImmediate(inst),
.subs_immediate => try emit.mirAddSubtractImmediate(inst),
- .asr_register => try emit.mirShiftRegister(inst),
- .lsl_register => try emit.mirShiftRegister(inst),
- .lsr_register => try emit.mirShiftRegister(inst),
+ .asr_register => try emit.mirDataProcessing2Source(inst),
+ .lsl_register => try emit.mirDataProcessing2Source(inst),
+ .lsr_register => try emit.mirDataProcessing2Source(inst),
+ .sdiv => try emit.mirDataProcessing2Source(inst),
+ .udiv => try emit.mirDataProcessing2Source(inst),
.asr_immediate => try emit.mirShiftImmediate(inst),
.lsl_immediate => try emit.mirShiftImmediate(inst),
@@ -148,6 +159,13 @@ pub fn emitMir(
.strb_stack => try emit.mirLoadStoreStack(inst),
.strh_stack => try emit.mirLoadStoreStack(inst),
+ .ldr_stack_argument => try emit.mirLoadStackArgument(inst),
+ .ldr_ptr_stack_argument => try emit.mirLoadStackArgument(inst),
+ .ldrb_stack_argument => try emit.mirLoadStackArgument(inst),
+ .ldrh_stack_argument => try emit.mirLoadStackArgument(inst),
+ .ldrsb_stack_argument => try emit.mirLoadStackArgument(inst),
+ .ldrsh_stack_argument => try emit.mirLoadStackArgument(inst),
+
.ldr_register => try emit.mirLoadStoreRegisterRegister(inst),
.ldrb_register => try emit.mirLoadStoreRegisterRegister(inst),
.ldrh_register => try emit.mirLoadStoreRegisterRegister(inst),
@@ -172,6 +190,7 @@ pub fn emitMir(
.movk => try emit.mirMoveWideImmediate(inst),
.movz => try emit.mirMoveWideImmediate(inst),
+ .msub => try emit.mirDataProcessing3Source(inst),
.mul => try emit.mirDataProcessing3Source(inst),
.smulh => try emit.mirDataProcessing3Source(inst),
.smull => try emit.mirDataProcessing3Source(inst),
@@ -258,7 +277,7 @@ fn instructionSize(emit: *Emit, inst: Mir.Inst.Index) usize {
=> return 2 * 4,
.pop_regs, .push_regs => {
const reg_list = emit.mir.instructions.items(.data)[inst].reg_list;
- const number_of_regs = @popCount(u32, reg_list);
+ const number_of_regs = @popCount(reg_list);
const number_of_insts = std.math.divCeil(u6, number_of_regs, 2) catch unreachable;
return number_of_insts * 4;
},
@@ -504,7 +523,7 @@ fn mirAddSubtractImmediate(emit: *Emit, inst: Mir.Inst.Index) !void {
}
}
-fn mirShiftRegister(emit: *Emit, inst: Mir.Inst.Index) !void {
+fn mirDataProcessing2Source(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const rrr = emit.mir.instructions.items(.data)[inst].rrr;
const rd = rrr.rd;
@@ -515,6 +534,8 @@ fn mirShiftRegister(emit: *Emit, inst: Mir.Inst.Index) !void {
.asr_register => try emit.writeInstruction(Instruction.asrRegister(rd, rn, rm)),
.lsl_register => try emit.writeInstruction(Instruction.lslRegister(rd, rn, rm)),
.lsr_register => try emit.writeInstruction(Instruction.lsrRegister(rd, rn, rm)),
+ .sdiv => try emit.writeInstruction(Instruction.sdiv(rd, rn, rm)),
+ .udiv => try emit.writeInstruction(Instruction.udiv(rd, rn, rm)),
else => unreachable,
}
}
@@ -920,6 +941,67 @@ fn mirLoadStoreRegisterPair(emit: *Emit, inst: Mir.Inst.Index) !void {
}
}
+fn mirLoadStackArgument(emit: *Emit, inst: Mir.Inst.Index) !void {
+ const tag = emit.mir.instructions.items(.tag)[inst];
+ const load_store_stack = emit.mir.instructions.items(.data)[inst].load_store_stack;
+ const rt = load_store_stack.rt;
+
+ const raw_offset = emit.stack_size + emit.saved_regs_stack_space + load_store_stack.offset;
+ switch (tag) {
+ .ldr_ptr_stack_argument => {
+ const offset = if (math.cast(u12, raw_offset)) |imm| imm else {
+ return emit.fail("TODO load stack argument ptr with larger offset", .{});
+ };
+
+ switch (tag) {
+ .ldr_ptr_stack_argument => try emit.writeInstruction(Instruction.add(rt, .sp, offset, false)),
+ else => unreachable,
+ }
+ },
+ .ldrb_stack_argument, .ldrsb_stack_argument => {
+ const offset = if (math.cast(u12, raw_offset)) |imm| Instruction.LoadStoreOffset.imm(imm) else {
+ return emit.fail("TODO load stack argument byte with larger offset", .{});
+ };
+
+ switch (tag) {
+ .ldrb_stack_argument => try emit.writeInstruction(Instruction.ldrb(rt, .sp, offset)),
+ .ldrsb_stack_argument => try emit.writeInstruction(Instruction.ldrsb(rt, .sp, offset)),
+ else => unreachable,
+ }
+ },
+ .ldrh_stack_argument, .ldrsh_stack_argument => {
+ assert(std.mem.isAlignedGeneric(u32, raw_offset, 2)); // misaligned stack entry
+ const offset = if (math.cast(u12, @divExact(raw_offset, 2))) |imm| Instruction.LoadStoreOffset.imm(imm) else {
+ return emit.fail("TODO load stack argument halfword with larger offset", .{});
+ };
+
+ switch (tag) {
+ .ldrh_stack_argument => try emit.writeInstruction(Instruction.ldrh(rt, .sp, offset)),
+ .ldrsh_stack_argument => try emit.writeInstruction(Instruction.ldrsh(rt, .sp, offset)),
+ else => unreachable,
+ }
+ },
+ .ldr_stack_argument => {
+ const alignment: u32 = switch (rt.size()) {
+ 32 => 4,
+ 64 => 8,
+ else => unreachable,
+ };
+
+ assert(std.mem.isAlignedGeneric(u32, raw_offset, alignment)); // misaligned stack entry
+ const offset = if (math.cast(u12, @divExact(raw_offset, alignment))) |imm| Instruction.LoadStoreOffset.imm(imm) else {
+ return emit.fail("TODO load stack argument with larger offset", .{});
+ };
+
+ switch (tag) {
+ .ldr_stack_argument => try emit.writeInstruction(Instruction.ldr(rt, .sp, offset)),
+ else => unreachable,
+ }
+ },
+ else => unreachable,
+ }
+}
+
fn mirLoadStoreStack(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const load_store_stack = emit.mir.instructions.items(.data)[inst].load_store_stack;
@@ -1059,14 +1141,31 @@ fn mirMoveWideImmediate(emit: *Emit, inst: Mir.Inst.Index) !void {
fn mirDataProcessing3Source(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
- const rrr = emit.mir.instructions.items(.data)[inst].rrr;
switch (tag) {
- .mul => try emit.writeInstruction(Instruction.mul(rrr.rd, rrr.rn, rrr.rm)),
- .smulh => try emit.writeInstruction(Instruction.smulh(rrr.rd, rrr.rn, rrr.rm)),
- .smull => try emit.writeInstruction(Instruction.smull(rrr.rd, rrr.rn, rrr.rm)),
- .umulh => try emit.writeInstruction(Instruction.umulh(rrr.rd, rrr.rn, rrr.rm)),
- .umull => try emit.writeInstruction(Instruction.umull(rrr.rd, rrr.rn, rrr.rm)),
+ .mul,
+ .smulh,
+ .smull,
+ .umulh,
+ .umull,
+ => {
+ const rrr = emit.mir.instructions.items(.data)[inst].rrr;
+ switch (tag) {
+ .mul => try emit.writeInstruction(Instruction.mul(rrr.rd, rrr.rn, rrr.rm)),
+ .smulh => try emit.writeInstruction(Instruction.smulh(rrr.rd, rrr.rn, rrr.rm)),
+ .smull => try emit.writeInstruction(Instruction.smull(rrr.rd, rrr.rn, rrr.rm)),
+ .umulh => try emit.writeInstruction(Instruction.umulh(rrr.rd, rrr.rn, rrr.rm)),
+ .umull => try emit.writeInstruction(Instruction.umull(rrr.rd, rrr.rn, rrr.rm)),
+ else => unreachable,
+ }
+ },
+ .msub => {
+ const rrrr = emit.mir.instructions.items(.data)[inst].rrrr;
+ switch (tag) {
+ .msub => try emit.writeInstruction(Instruction.msub(rrrr.rd, rrrr.rn, rrrr.rm, rrrr.ra)),
+ else => unreachable,
+ }
+ },
else => unreachable,
}
}
@@ -1084,7 +1183,7 @@ fn mirPushPopRegs(emit: *Emit, inst: Mir.Inst.Index) !void {
// sp must be aligned at all times, so we only use stp and ldp
// instructions for minimal instruction count. However, if we do
// not have an even number of registers, we use str and ldr
- const number_of_regs = @popCount(u32, reg_list);
+ const number_of_regs = @popCount(reg_list);
switch (tag) {
.pop_regs => {
diff --git a/src/arch/aarch64/Mir.zig b/src/arch/aarch64/Mir.zig
index 2fef069f7a..00537e0e38 100644
--- a/src/arch/aarch64/Mir.zig
+++ b/src/arch/aarch64/Mir.zig
@@ -92,20 +92,28 @@ pub const Inst = struct {
load_memory_ptr_direct,
/// Load Pair of Registers
ldp,
+ /// Pseudo-instruction: Load pointer to stack argument
+ ldr_ptr_stack_argument,
/// Pseudo-instruction: Load from stack
ldr_stack,
+ /// Pseudo-instruction: Load from stack argument
+ ldr_stack_argument,
/// Load Register (immediate)
ldr_immediate,
/// Load Register (register)
ldr_register,
/// Pseudo-instruction: Load byte from stack
ldrb_stack,
+ /// Pseudo-instruction: Load byte from stack argument
+ ldrb_stack_argument,
/// Load Register Byte (immediate)
ldrb_immediate,
/// Load Register Byte (register)
ldrb_register,
/// Pseudo-instruction: Load halfword from stack
ldrh_stack,
+ /// Pseudo-instruction: Load halfword from stack argument
+ ldrh_stack_argument,
/// Load Register Halfword (immediate)
ldrh_immediate,
/// Load Register Halfword (register)
@@ -114,10 +122,14 @@ pub const Inst = struct {
ldrsb_immediate,
/// Pseudo-instruction: Load signed byte from stack
ldrsb_stack,
+ /// Pseudo-instruction: Load signed byte from stack argument
+ ldrsb_stack_argument,
/// Load Register Signed Halfword (immediate)
ldrsh_immediate,
/// Pseudo-instruction: Load signed halfword from stack
ldrsh_stack,
+ /// Pseudo-instruction: Load signed halfword from stack argument
+ ldrsh_stack_argument,
/// Load Register Signed Word (immediate)
ldrsw_immediate,
/// Logical Shift Left (immediate)
@@ -136,6 +148,8 @@ pub const Inst = struct {
movk,
/// Move wide with zero
movz,
+ /// Multiply-subtract
+ msub,
/// Multiply
mul,
/// Bitwise NOT
@@ -152,6 +166,8 @@ pub const Inst = struct {
ret,
/// Signed bitfield extract
sbfx,
+ /// Signed divide
+ sdiv,
/// Signed multiply high
smulh,
/// Signed multiply long
@@ -200,6 +216,8 @@ pub const Inst = struct {
tst_immediate,
/// Unsigned bitfield extract
ubfx,
+ /// Unsigned divide
+ udiv,
/// Unsigned multiply high
umulh,
/// Unsigned multiply long
@@ -430,6 +448,15 @@ pub const Inst = struct {
rn: Register,
offset: bits.Instruction.LoadStorePairOffset,
},
+ /// Four registers
+ ///
+ /// Used by e.g. msub
+ rrrr: struct {
+ rd: Register,
+ rn: Register,
+ rm: Register,
+ ra: Register,
+ },
/// Debug info: line and column
///
/// Used by e.g. dbg_line
diff --git a/src/arch/aarch64/bits.zig b/src/arch/aarch64/bits.zig
index a3f5fbac51..ad45661b70 100644
--- a/src/arch/aarch64/bits.zig
+++ b/src/arch/aarch64/bits.zig
@@ -1698,6 +1698,14 @@ pub const Instruction = union(enum) {
// Data processing (2 source)
+ pub fn udiv(rd: Register, rn: Register, rm: Register) Instruction {
+ return dataProcessing2Source(0b0, 0b000010, rd, rn, rm);
+ }
+
+ pub fn sdiv(rd: Register, rn: Register, rm: Register) Instruction {
+ return dataProcessing2Source(0b0, 0b000011, rd, rn, rm);
+ }
+
pub fn lslv(rd: Register, rn: Register, rm: Register) Instruction {
return dataProcessing2Source(0b0, 0b001000, rd, rn, rm);
}
diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig
index 93d98c41d3..e8f0507614 100644
--- a/src/arch/arm/CodeGen.zig
+++ b/src/arch/arm/CodeGen.zig
@@ -247,6 +247,31 @@ const BigTomb = struct {
log.debug("%{d} => {}", .{ bt.inst, result });
const branch = &bt.function.branch_stack.items[bt.function.branch_stack.items.len - 1];
branch.inst_table.putAssumeCapacityNoClobber(bt.inst, result);
+
+ switch (result) {
+ .register => |reg| {
+ // In some cases (such as bitcast), an operand
+ // may be the same MCValue as the result. If
+ // that operand died and was a register, it
+ // was freed by processDeath. We have to
+ // "re-allocate" the register.
+ if (bt.function.register_manager.isRegFree(reg)) {
+ bt.function.register_manager.getRegAssumeFree(reg, bt.inst);
+ }
+ },
+ .register_c_flag,
+ .register_v_flag,
+ => |reg| {
+ if (bt.function.register_manager.isRegFree(reg)) {
+ bt.function.register_manager.getRegAssumeFree(reg, bt.inst);
+ }
+ bt.function.cpsr_flags_inst = bt.inst;
+ },
+ .cpsr_flags => {
+ bt.function.cpsr_flags_inst = bt.inst;
+ },
+ else => {},
+ }
}
bt.function.finishAirBookkeeping();
}
@@ -332,7 +357,7 @@ pub fn generate(
};
for (function.dbg_arg_relocs.items) |reloc| {
- try function.genArgDbgInfo(reloc.inst, reloc.index, call_info.stack_byte_count);
+ try function.genArgDbgInfo(reloc.inst, reloc.index);
}
var mir = Mir{
@@ -351,7 +376,8 @@ pub fn generate(
.prev_di_pc = 0,
.prev_di_line = module_fn.lbrace_line,
.prev_di_column = module_fn.lbrace_column,
- .prologue_stack_space = call_info.stack_byte_count + function.saved_regs_stack_space,
+ .stack_size = function.max_end_stack,
+ .saved_regs_stack_space = function.saved_regs_stack_space,
};
defer emit.deinit();
@@ -464,6 +490,7 @@ fn gen(self: *Self) !void {
const total_stack_size = self.max_end_stack + self.saved_regs_stack_space;
const aligned_total_stack_end = mem.alignForwardGeneric(u32, total_stack_size, self.stack_align);
const stack_size = aligned_total_stack_end - self.saved_regs_stack_space;
+ self.max_end_stack = stack_size;
if (Instruction.Operand.fromU32(stack_size)) |op| {
self.mir_instructions.set(sub_reloc, .{
.tag = .sub,
@@ -768,6 +795,9 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.float_to_int_optimized,
=> return self.fail("TODO implement optimized float mode", .{}),
+ .is_named_enum_value => return self.fail("TODO implement is_named_enum_value", .{}),
+ .error_set_has_value => return self.fail("TODO implement error_set_has_value", .{}),
+
.wasm_memory_size => unreachable,
.wasm_memory_grow => unreachable,
// zig fmt: on
@@ -1810,7 +1840,7 @@ fn errUnionErr(self: *Self, error_union_mcv: MCValue, error_union_ty: Type) !MCV
switch (error_union_mcv) {
.register => return self.fail("TODO errUnionErr for registers", .{}),
.stack_argument_offset => |off| {
- return MCValue{ .stack_argument_offset = off - err_offset };
+ return MCValue{ .stack_argument_offset = off + err_offset };
},
.stack_offset => |off| {
return MCValue{ .stack_offset = off - err_offset };
@@ -1847,7 +1877,7 @@ fn errUnionPayload(self: *Self, error_union_mcv: MCValue, error_union_ty: Type)
switch (error_union_mcv) {
.register => return self.fail("TODO errUnionPayload for registers", .{}),
.stack_argument_offset => |off| {
- return MCValue{ .stack_argument_offset = off - payload_offset };
+ return MCValue{ .stack_argument_offset = off + payload_offset };
},
.stack_offset => |off| {
return MCValue{ .stack_offset = off - payload_offset };
@@ -1981,7 +2011,7 @@ fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void {
.dead, .unreach => unreachable,
.register => unreachable, // a slice doesn't fit in one register
.stack_argument_offset => |off| {
- break :result MCValue{ .stack_argument_offset = off - 4 };
+ break :result MCValue{ .stack_argument_offset = off + 4 };
},
.stack_offset => |off| {
break :result MCValue{ .stack_offset = off - 4 };
@@ -2257,16 +2287,17 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
.register_c_flag,
.register_v_flag,
=> unreachable, // cannot hold an address
- .immediate => |imm| try self.setRegOrMem(elem_ty, dst_mcv, .{ .memory = imm }),
- .ptr_stack_offset => |off| try self.setRegOrMem(elem_ty, dst_mcv, .{ .stack_offset = off }),
+ .immediate => |imm| {
+ try self.setRegOrMem(elem_ty, dst_mcv, .{ .memory = imm });
+ },
+ .ptr_stack_offset => |off| {
+ try self.setRegOrMem(elem_ty, dst_mcv, .{ .stack_offset = off });
+ },
.register => |reg| {
const reg_lock = self.register_manager.lockReg(reg);
defer if (reg_lock) |reg_locked| self.register_manager.unlockReg(reg_locked);
switch (dst_mcv) {
- .dead => unreachable,
- .undef => unreachable,
- .cpsr_flags => unreachable,
.register => |dst_reg| {
try self.genLdrRegister(dst_reg, reg, elem_ty);
},
@@ -2302,7 +2333,7 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
try self.genInlineMemcpy(src_reg, dst_reg, len_reg, count_reg, tmp_reg);
}
},
- else => return self.fail("TODO load from register into {}", .{dst_mcv}),
+ else => unreachable, // attempting to load into non-register or non-stack MCValue
}
},
.memory,
@@ -2399,7 +2430,7 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
// sub src_reg, fp, #off
try self.genSetReg(ptr_ty, src_reg, .{ .ptr_stack_offset = off });
},
- .memory => |addr| try self.genSetReg(Type.usize, src_reg, .{ .immediate = @intCast(u32, addr) }),
+ .memory => |addr| try self.genSetReg(ptr_ty, src_reg, .{ .immediate = @intCast(u32, addr) }),
.stack_argument_offset => |off| {
_ = try self.addInst(.{
.tag = .ldr_ptr_stack_argument,
@@ -2505,7 +2536,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
switch (mcv) {
.dead, .unreach => unreachable,
.stack_argument_offset => |off| {
- break :result MCValue{ .stack_argument_offset = off - struct_field_offset };
+ break :result MCValue{ .stack_argument_offset = off + struct_field_offset };
},
.stack_offset => |off| {
break :result MCValue{ .stack_offset = off - struct_field_offset };
@@ -3345,6 +3376,102 @@ fn genInlineMemcpy(
// end:
}
+fn genInlineMemset(
+ self: *Self,
+ dst: MCValue,
+ val: MCValue,
+ len: MCValue,
+) !void {
+ const dst_reg = switch (dst) {
+ .register => |r| r,
+ else => try self.copyToTmpRegister(Type.initTag(.manyptr_u8), dst),
+ };
+ const dst_reg_lock = self.register_manager.lockReg(dst_reg);
+ defer if (dst_reg_lock) |lock| self.register_manager.unlockReg(lock);
+
+ const val_reg = switch (val) {
+ .register => |r| r,
+ else => try self.copyToTmpRegister(Type.initTag(.u8), val),
+ };
+ const val_reg_lock = self.register_manager.lockReg(val_reg);
+ defer if (val_reg_lock) |lock| self.register_manager.unlockReg(lock);
+
+ const len_reg = switch (len) {
+ .register => |r| r,
+ else => try self.copyToTmpRegister(Type.usize, len),
+ };
+ const len_reg_lock = self.register_manager.lockReg(len_reg);
+ defer if (len_reg_lock) |lock| self.register_manager.unlockReg(lock);
+
+ const count_reg = try self.register_manager.allocReg(null, gp);
+
+ try self.genInlineMemsetCode(dst_reg, val_reg, len_reg, count_reg);
+}
+
+fn genInlineMemsetCode(
+ self: *Self,
+ dst: Register,
+ val: Register,
+ len: Register,
+ count: Register,
+) !void {
+ // mov count, #0
+ _ = try self.addInst(.{
+ .tag = .mov,
+ .data = .{ .rr_op = .{
+ .rd = count,
+ .rn = .r0,
+ .op = Instruction.Operand.imm(0, 0),
+ } },
+ });
+
+ // loop:
+ // cmp count, len
+ _ = try self.addInst(.{
+ .tag = .cmp,
+ .data = .{ .rr_op = .{
+ .rd = .r0,
+ .rn = count,
+ .op = Instruction.Operand.reg(len, Instruction.Operand.Shift.none),
+ } },
+ });
+
+ // bge end
+ _ = try self.addInst(.{
+ .tag = .b,
+ .cond = .ge,
+ .data = .{ .inst = @intCast(u32, self.mir_instructions.len + 4) },
+ });
+
+ // strb val, [src, count]
+ _ = try self.addInst(.{
+ .tag = .strb,
+ .data = .{ .rr_offset = .{
+ .rt = val,
+ .rn = dst,
+ .offset = .{ .offset = Instruction.Offset.reg(count, .none) },
+ } },
+ });
+
+ // add count, count, #1
+ _ = try self.addInst(.{
+ .tag = .add,
+ .data = .{ .rr_op = .{
+ .rd = count,
+ .rn = count,
+ .op = Instruction.Operand.imm(1, 0),
+ } },
+ });
+
+ // b loop
+ _ = try self.addInst(.{
+ .tag = .b,
+ .data = .{ .inst = @intCast(u32, self.mir_instructions.len - 4) },
+ });
+
+ // end:
+}
+
/// Adds a Type to the .debug_info at the current position. The bytes will be populated later,
/// after codegen for this symbol is done.
fn addDbgInfoTypeReloc(self: *Self, ty: Type) error{OutOfMemory}!void {
@@ -3367,12 +3494,10 @@ fn addDbgInfoTypeReloc(self: *Self, ty: Type) error{OutOfMemory}!void {
}
}
-fn genArgDbgInfo(self: *Self, inst: Air.Inst.Index, arg_index: u32, stack_byte_count: u32) error{OutOfMemory}!void {
- const prologue_stack_space = stack_byte_count + self.saved_regs_stack_space;
-
+fn genArgDbgInfo(self: *Self, inst: Air.Inst.Index, arg_index: u32) error{OutOfMemory}!void {
const mcv = self.args[arg_index];
const ty = self.air.instructions.items(.data)[inst].ty;
- const name = self.mod_fn.getParamName(arg_index);
+ const name = self.mod_fn.getParamName(self.bin_file.options.module.?, arg_index);
const name_with_null = name.ptr[0 .. name.len + 1];
switch (mcv) {
@@ -3402,7 +3527,7 @@ fn genArgDbgInfo(self: *Self, inst: Air.Inst.Index, arg_index: u32, stack_byte_c
// const abi_size = @intCast(u32, ty.abiSize(self.target.*));
const adjusted_stack_offset = switch (mcv) {
.stack_offset => |offset| -@intCast(i32, offset),
- .stack_argument_offset => |offset| @intCast(i32, prologue_stack_space - offset),
+ .stack_argument_offset => |offset| @intCast(i32, self.saved_regs_stack_space + offset),
else => unreachable,
};
@@ -3522,7 +3647,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
try self.register_manager.getReg(reg, null);
}
- if (info.return_value == .stack_offset) {
+ // If returning by reference, r0 will contain the address of where
+ // to put the result into. In that case, make sure that r0 remains
+ // untouched by the parameter passing code
+ const r0_lock: ?RegisterLock = if (info.return_value == .stack_offset) blk: {
log.debug("airCall: return by reference", .{});
const ret_ty = fn_ty.fnReturnType();
const ret_abi_size = @intCast(u32, ret_ty.abiSize(self.target.*));
@@ -3538,7 +3666,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
try self.genSetReg(ptr_ty, .r0, .{ .ptr_stack_offset = stack_offset });
info.return_value = .{ .stack_offset = stack_offset };
- }
+
+ break :blk self.register_manager.lockRegAssumeUnused(.r0);
+ } else null;
+ defer if (r0_lock) |reg| self.register_manager.unlockReg(reg);
// Make space for the arguments passed via the stack
self.max_end_stack += info.stack_byte_count;
@@ -3557,7 +3688,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
.stack_offset => unreachable,
.stack_argument_offset => |offset| try self.genSetStackArgument(
arg_ty,
- info.stack_byte_count - offset,
+ offset,
arg_mcv,
),
else => unreachable,
@@ -4619,11 +4750,15 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
if (!self.wantSafety())
return; // The already existing value will do just fine.
// TODO Upgrade this to a memset call when we have that available.
- switch (ty.abiSize(self.target.*)) {
- 1 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaa }),
- 2 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaa }),
- 4 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaa }),
- else => return self.fail("TODO implement memset", .{}),
+ switch (abi_size) {
+ 1 => try self.genSetStack(ty, stack_offset, .{ .immediate = 0xaa }),
+ 2 => try self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaa }),
+ 4 => try self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaa }),
+ else => try self.genInlineMemset(
+ .{ .ptr_stack_offset = stack_offset },
+ .{ .immediate = 0xaa },
+ .{ .immediate = abi_size },
+ ),
}
},
.cpsr_flags,
@@ -5035,9 +5170,9 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I
return; // The already existing value will do just fine.
// TODO Upgrade this to a memset call when we have that available.
switch (abi_size) {
- 1 => return self.genSetStackArgument(ty, stack_offset, .{ .immediate = 0xaa }),
- 2 => return self.genSetStackArgument(ty, stack_offset, .{ .immediate = 0xaaaa }),
- 4 => return self.genSetStackArgument(ty, stack_offset, .{ .immediate = 0xaaaaaaaa }),
+ 1 => try self.genSetStackArgument(ty, stack_offset, .{ .immediate = 0xaa }),
+ 2 => try self.genSetStackArgument(ty, stack_offset, .{ .immediate = 0xaaaa }),
+ 4 => try self.genSetStackArgument(ty, stack_offset, .{ .immediate = 0xaaaaaaaa }),
else => return self.fail("TODO implement memset", .{}),
}
},
@@ -5651,8 +5786,8 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
if (ty.abiAlignment(self.target.*) == 8)
nsaa = std.mem.alignForwardGeneric(u32, nsaa, 8);
- nsaa += param_size;
result.args[i] = .{ .stack_argument_offset = nsaa };
+ nsaa += param_size;
}
}
@@ -5685,9 +5820,11 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
for (param_types) |ty, i| {
if (ty.abiSize(self.target.*) > 0) {
const param_size = @intCast(u32, ty.abiSize(self.target.*));
+ const param_alignment = ty.abiAlignment(self.target.*);
- stack_offset = std.mem.alignForwardGeneric(u32, stack_offset, ty.abiAlignment(self.target.*)) + param_size;
+ stack_offset = std.mem.alignForwardGeneric(u32, stack_offset, param_alignment);
result.args[i] = .{ .stack_argument_offset = stack_offset };
+ stack_offset += param_size;
} else {
result.args[i] = .{ .none = {} };
}
diff --git a/src/arch/arm/Emit.zig b/src/arch/arm/Emit.zig
index 47d508b34a..cf749792f0 100644
--- a/src/arch/arm/Emit.zig
+++ b/src/arch/arm/Emit.zig
@@ -33,9 +33,13 @@ prev_di_column: u32,
/// Relative to the beginning of `code`.
prev_di_pc: usize,
-/// The amount of stack space consumed by all stack arguments as well
-/// as the saved callee-saved registers
-prologue_stack_space: u32,
+/// The amount of stack space consumed by the saved callee-saved
+/// registers in bytes
+saved_regs_stack_space: u32,
+
+/// The final stack frame size of the function (already aligned to the
+/// respective stack alignment). Does not include prologue stack space.
+stack_size: u32,
/// The branch type of every branch
branch_types: std.AutoHashMapUnmanaged(Mir.Inst.Index, BranchType) = .{},
@@ -500,14 +504,15 @@ fn mirLoadStackArgument(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const cond = emit.mir.instructions.items(.cond)[inst];
const r_stack_offset = emit.mir.instructions.items(.data)[inst].r_stack_offset;
+ const rt = r_stack_offset.rt;
- const raw_offset = emit.prologue_stack_space - r_stack_offset.stack_offset;
+ const raw_offset = emit.stack_size + emit.saved_regs_stack_space + r_stack_offset.stack_offset;
switch (tag) {
.ldr_ptr_stack_argument => {
const operand = Instruction.Operand.fromU32(raw_offset) orelse
return emit.fail("TODO mirLoadStack larger offsets", .{});
- try emit.writeInstruction(Instruction.add(cond, r_stack_offset.rt, .fp, operand));
+ try emit.writeInstruction(Instruction.add(cond, rt, .sp, operand));
},
.ldr_stack_argument,
.ldrb_stack_argument,
@@ -516,23 +521,11 @@ fn mirLoadStackArgument(emit: *Emit, inst: Mir.Inst.Index) !void {
break :blk Instruction.Offset.imm(@intCast(u12, raw_offset));
} else return emit.fail("TODO mirLoadStack larger offsets", .{});
- const ldr = switch (tag) {
- .ldr_stack_argument => &Instruction.ldr,
- .ldrb_stack_argument => &Instruction.ldrb,
+ switch (tag) {
+ .ldr_stack_argument => try emit.writeInstruction(Instruction.ldr(cond, rt, .sp, .{ .offset = offset })),
+ .ldrb_stack_argument => try emit.writeInstruction(Instruction.ldrb(cond, rt, .sp, .{ .offset = offset })),
else => unreachable,
- };
-
- const ldr_workaround = switch (builtin.zig_backend) {
- .stage1 => ldr.*,
- else => ldr,
- };
-
- try emit.writeInstruction(ldr_workaround(
- cond,
- r_stack_offset.rt,
- .fp,
- .{ .offset = offset },
- ));
+ }
},
.ldrh_stack_argument,
.ldrsb_stack_argument,
@@ -542,24 +535,12 @@ fn mirLoadStackArgument(emit: *Emit, inst: Mir.Inst.Index) !void {
break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, raw_offset));
} else return emit.fail("TODO mirLoadStack larger offsets", .{});
- const ldr = switch (tag) {
- .ldrh_stack_argument => &Instruction.ldrh,
- .ldrsb_stack_argument => &Instruction.ldrsb,
- .ldrsh_stack_argument => &Instruction.ldrsh,
+ switch (tag) {
+ .ldrh_stack_argument => try emit.writeInstruction(Instruction.ldrh(cond, rt, .sp, .{ .offset = offset })),
+ .ldrsb_stack_argument => try emit.writeInstruction(Instruction.ldrsb(cond, rt, .sp, .{ .offset = offset })),
+ .ldrsh_stack_argument => try emit.writeInstruction(Instruction.ldrsh(cond, rt, .sp, .{ .offset = offset })),
else => unreachable,
- };
-
- const ldr_workaround = switch (builtin.zig_backend) {
- .stage1 => ldr.*,
- else => ldr,
- };
-
- try emit.writeInstruction(ldr_workaround(
- cond,
- r_stack_offset.rt,
- .fp,
- .{ .offset = offset },
- ));
+ }
},
else => unreachable,
}
diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig
index 220fb18699..06adcff6d4 100644
--- a/src/arch/riscv64/CodeGen.zig
+++ b/src/arch/riscv64/CodeGen.zig
@@ -693,6 +693,9 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.float_to_int_optimized,
=> return self.fail("TODO implement optimized float mode", .{}),
+ .is_named_enum_value => return self.fail("TODO implement is_named_enum_value", .{}),
+ .error_set_has_value => return self.fail("TODO implement error_set_has_value", .{}),
+
.wasm_memory_size => unreachable,
.wasm_memory_grow => unreachable,
// zig fmt: on
@@ -1619,7 +1622,7 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void {
fn genArgDbgInfo(self: *Self, inst: Air.Inst.Index, mcv: MCValue, arg_index: u32) !void {
const ty = self.air.instructions.items(.data)[inst].ty;
- const name = self.mod_fn.getParamName(arg_index);
+ const name = self.mod_fn.getParamName(self.bin_file.options.module.?, arg_index);
const name_with_null = name.ptr[0 .. name.len + 1];
switch (mcv) {
diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig
index 2c6a322fca..cd891f0fa3 100644
--- a/src/arch/sparc64/CodeGen.zig
+++ b/src/arch/sparc64/CodeGen.zig
@@ -705,6 +705,9 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.float_to_int_optimized,
=> @panic("TODO implement optimized float mode"),
+ .is_named_enum_value => @panic("TODO implement is_named_enum_value"),
+ .error_set_has_value => @panic("TODO implement error_set_has_value"),
+
.wasm_memory_size => unreachable,
.wasm_memory_grow => unreachable,
// zig fmt: on
@@ -2959,7 +2962,7 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live
fn genArgDbgInfo(self: *Self, inst: Air.Inst.Index, mcv: MCValue, arg_index: u32) !void {
const ty = self.air.instructions.items(.data)[inst].ty;
- const name = self.mod_fn.getParamName(arg_index);
+ const name = self.mod_fn.getParamName(self.bin_file.options.module.?, arg_index);
const name_with_null = name.ptr[0 .. name.len + 1];
switch (mcv) {
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index 91072d0b4c..95a0a8e4aa 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -29,6 +29,8 @@ const errUnionErrorOffset = codegen.errUnionErrorOffset;
const WValue = union(enum) {
/// May be referenced but is unused
none: void,
+ /// The value lives on top of the stack
+ stack: void,
/// Index of the local variable
local: u32,
/// An immediate 32bit value
@@ -55,7 +57,7 @@ const WValue = union(enum) {
/// In wasm function pointers are indexes into a function table,
/// rather than an address in the data section.
function_index: u32,
- /// Offset from the bottom of the stack, with the offset
+ /// Offset from the bottom of the virtual stack, with the offset
/// pointing to where the value lives.
stack_offset: u32,
@@ -71,6 +73,38 @@ const WValue = union(enum) {
else => return 0,
}
}
+
+ /// Promotes a `WValue` to a local when given value is on top of the stack.
+ /// When encountering a `local` or `stack_offset` this is essentially a no-op.
+ /// All other tags are illegal.
+ fn toLocal(value: WValue, gen: *Self, ty: Type) InnerError!WValue {
+ switch (value) {
+ .stack => {
+ const local = try gen.allocLocal(ty);
+ try gen.addLabel(.local_set, local.local);
+ return local;
+ },
+ .local, .stack_offset => return value,
+ else => unreachable,
+ }
+ }
+
+ /// Marks a local as no longer being referenced and essentially allows
+ /// us to re-use it somewhere else within the function.
+ /// The valtype of the local is deducted by using the index of the given.
+ fn free(value: *WValue, gen: *Self) void {
+ if (value.* != .local) return;
+ const local_value = value.local;
+ const index = local_value - gen.args.len - @boolToInt(gen.return_value != .none);
+ const valtype = @intToEnum(wasm.Valtype, gen.locals.items[index]);
+ switch (valtype) {
+ .i32 => gen.free_locals_i32.append(gen.gpa, local_value) catch return, // It's ok to fail any of those, a new local can be allocated instead
+ .i64 => gen.free_locals_i64.append(gen.gpa, local_value) catch return,
+ .f32 => gen.free_locals_f32.append(gen.gpa, local_value) catch return,
+ .f64 => gen.free_locals_f64.append(gen.gpa, local_value) catch return,
+ }
+ value.* = WValue{ .none = {} };
+ }
};
/// Wasm ops, but without input/output/signedness information
@@ -601,6 +635,21 @@ stack_size: u32 = 0,
/// However, local variables or the usage of `@setAlignStack` can overwrite this default.
stack_alignment: u32 = 16,
+// For each individual Wasm valtype we store a seperate free list which
+// allows us to re-use locals that are no longer used. e.g. a temporary local.
+/// A list of indexes which represents a local of valtype `i32`.
+/// It is illegal to store a non-i32 valtype in this list.
+free_locals_i32: std.ArrayListUnmanaged(u32) = .{},
+/// A list of indexes which represents a local of valtype `i64`.
+/// It is illegal to store a non-i32 valtype in this list.
+free_locals_i64: std.ArrayListUnmanaged(u32) = .{},
+/// A list of indexes which represents a local of valtype `f32`.
+/// It is illegal to store a non-i32 valtype in this list.
+free_locals_f32: std.ArrayListUnmanaged(u32) = .{},
+/// A list of indexes which represents a local of valtype `f64`.
+/// It is illegal to store a non-i32 valtype in this list.
+free_locals_f64: std.ArrayListUnmanaged(u32) = .{},
+
const InnerError = error{
OutOfMemory,
/// An error occurred when trying to lower AIR to MIR.
@@ -759,7 +808,7 @@ fn genBlockType(ty: Type, target: std.Target) u8 {
/// Writes the bytecode depending on the given `WValue` in `val`
fn emitWValue(self: *Self, value: WValue) InnerError!void {
switch (value) {
- .none => {}, // no-op
+ .none, .stack => {}, // no-op
.local => |idx| try self.addLabel(.local_get, idx),
.imm32 => |val| try self.addImm32(@bitCast(i32, val)),
.imm64 => |val| try self.addImm64(val),
@@ -781,9 +830,30 @@ fn emitWValue(self: *Self, value: WValue) InnerError!void {
/// Creates one locals for a given `Type`.
/// Returns a corresponding `Wvalue` with `local` as active tag
fn allocLocal(self: *Self, ty: Type) InnerError!WValue {
+ const valtype = typeToValtype(ty, self.target);
+ switch (valtype) {
+ .i32 => if (self.free_locals_i32.popOrNull()) |index| {
+ return WValue{ .local = index };
+ },
+ .i64 => if (self.free_locals_i64.popOrNull()) |index| {
+ return WValue{ .local = index };
+ },
+ .f32 => if (self.free_locals_f32.popOrNull()) |index| {
+ return WValue{ .local = index };
+ },
+ .f64 => if (self.free_locals_f64.popOrNull()) |index| {
+ return WValue{ .local = index };
+ },
+ }
+ // no local was free to be re-used, so allocate a new local instead
+ return self.ensureAllocLocal(ty);
+}
+
+/// Ensures a new local will be created. This is useful when it's useful
+/// to use a zero-initialized local.
+fn ensureAllocLocal(self: *Self, ty: Type) InnerError!WValue {
+ try self.locals.append(self.gpa, genValtype(ty, self.target));
const initial_index = self.local_index;
- const valtype = genValtype(ty, self.target);
- try self.locals.append(self.gpa, valtype);
self.local_index += 1;
return WValue{ .local = initial_index };
}
@@ -1135,9 +1205,9 @@ fn initializeStack(self: *Self) !void {
// Reserve a local to store the current stack pointer
// We can later use this local to set the stack pointer back to the value
// we have stored here.
- self.initial_stack_value = try self.allocLocal(Type.usize);
+ self.initial_stack_value = try self.ensureAllocLocal(Type.usize);
// Also reserve a local to store the bottom stack value
- self.bottom_stack_value = try self.allocLocal(Type.usize);
+ self.bottom_stack_value = try self.ensureAllocLocal(Type.usize);
}
/// Reads the stack pointer from `Context.initial_stack_value` and writes it
@@ -1268,7 +1338,9 @@ fn memcpy(self: *Self, dst: WValue, src: WValue, len: WValue) !void {
else => {
// TODO: We should probably lower this to a call to compiler_rt
// But for now, we implement it manually
- const offset = try self.allocLocal(Type.usize); // local for counter
+ var offset = try self.ensureAllocLocal(Type.usize); // local for counter
+ defer offset.free(self);
+
// outer block to jump to when loop is done
try self.startBlock(.block, wasm.block_empty);
try self.startBlock(.loop, wasm.block_empty);
@@ -1405,7 +1477,7 @@ fn buildPointerOffset(self: *Self, ptr_value: WValue, offset: u64, action: enum
// do not perform arithmetic when offset is 0.
if (offset == 0 and ptr_value.offset() == 0 and action == .modify) return ptr_value;
const result_ptr: WValue = switch (action) {
- .new => try self.allocLocal(Type.usize),
+ .new => try self.ensureAllocLocal(Type.usize),
.modify => ptr_value,
};
try self.emitWValue(ptr_value);
@@ -1621,6 +1693,8 @@ fn genInst(self: *Self, inst: Air.Inst.Index) !WValue {
.tag_name,
.err_return_trace,
.set_err_return_trace,
+ .is_named_enum_value,
+ .error_set_has_value,
=> |tag| return self.fail("TODO: Implement wasm inst: {s}", .{@tagName(tag)}),
.add_optimized,
@@ -1652,7 +1726,10 @@ fn genInst(self: *Self, inst: Air.Inst.Index) !WValue {
fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
for (body) |inst| {
const result = try self.genInst(inst);
- try self.values.putNoClobber(self.gpa, Air.indexToRef(inst), result);
+ if (result != .none) {
+ assert(result != .stack); // not allowed to store stack values as we cannot keep track of where they are on the stack
+ try self.values.putNoClobber(self.gpa, Air.indexToRef(inst), result);
+ }
}
}
@@ -1726,8 +1803,8 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const fn_info = self.decl.ty.fnInfo();
if (!firstParamSRet(fn_info.cc, fn_info.return_type, self.target)) {
- const result = try self.load(operand, ret_ty, 0);
- try self.emitWValue(result);
+ // leave on the stack
+ _ = try self.load(operand, ret_ty, 0);
}
try self.restoreStackPointer();
@@ -1846,6 +1923,7 @@ fn airStore(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
}
fn store(self: *Self, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerError!void {
+ assert(!(lhs != .stack and rhs == .stack));
switch (ty.zigTypeTag()) {
.ErrorUnion => {
const pl_ty = ty.errorUnionPayload();
@@ -1879,20 +1957,26 @@ fn store(self: *Self, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerErro
.Pointer => {
if (ty.isSlice()) {
// store pointer first
+ // lower it to the stack so we do not have to store rhs into a local first
+ try self.emitWValue(lhs);
const ptr_local = try self.load(rhs, Type.usize, 0);
- try self.store(lhs, ptr_local, Type.usize, 0);
+ try self.store(.{ .stack = {} }, ptr_local, Type.usize, 0 + lhs.offset());
// retrieve length from rhs, and store that alongside lhs as well
+ try self.emitWValue(lhs);
const len_local = try self.load(rhs, Type.usize, self.ptrSize());
- try self.store(lhs, len_local, Type.usize, self.ptrSize());
+ try self.store(.{ .stack = {} }, len_local, Type.usize, self.ptrSize() + lhs.offset());
return;
}
},
.Int => if (ty.intInfo(self.target).bits > 64) {
+ try self.emitWValue(lhs);
const lsb = try self.load(rhs, Type.u64, 0);
+ try self.store(.{ .stack = {} }, lsb, Type.u64, 0 + lhs.offset());
+
+ try self.emitWValue(lhs);
const msb = try self.load(rhs, Type.u64, 8);
- try self.store(lhs, lsb, Type.u64, 0);
- try self.store(lhs, msb, Type.u64, 8);
+ try self.store(.{ .stack = {} }, msb, Type.u64, 8 + lhs.offset());
return;
},
else => {},
@@ -1931,9 +2015,12 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
return new_local;
}
- return self.load(operand, ty, 0);
+ const stack_loaded = try self.load(operand, ty, 0);
+ return stack_loaded.toLocal(self, ty);
}
+/// Loads an operand from the linear memory section.
+/// NOTE: Leaves the value on the stack.
fn load(self: *Self, operand: WValue, ty: Type, offset: u32) InnerError!WValue {
// load local's value from memory by its stack position
try self.emitWValue(operand);
@@ -1951,10 +2038,7 @@ fn load(self: *Self, operand: WValue, ty: Type, offset: u32) InnerError!WValue {
.{ .offset = offset + operand.offset(), .alignment = ty.abiAlignment(self.target) },
);
- // store the result in a local
- const result = try self.allocLocal(ty);
- try self.addLabel(.local_set, result.local);
- return result;
+ return WValue{ .stack = {} };
}
fn airArg(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -1991,7 +2075,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
switch (self.debug_output) {
.dwarf => |dwarf| {
// TODO: Get the original arg index rather than wasm arg index
- const name = self.mod_fn.getParamName(arg_index);
+ const name = self.mod_fn.getParamName(self.bin_file.base.options.module.?, arg_index);
const leb_size = link.File.Wasm.getULEB128Size(arg.local);
const dbg_info = &dwarf.dbg_info;
try dbg_info.ensureUnusedCapacity(3 + leb_size + 5 + name.len + 1);
@@ -2024,10 +2108,14 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index, op: Op) InnerError!WValue {
const rhs = try self.resolveInst(bin_op.rhs);
const ty = self.air.typeOf(bin_op.lhs);
- return self.binOp(lhs, rhs, ty, op);
+ const stack_value = try self.binOp(lhs, rhs, ty, op);
+ return stack_value.toLocal(self, ty);
}
+/// Performs a binary operation on the given `WValue`'s
+/// NOTE: THis leaves the value on top of the stack.
fn binOp(self: *Self, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue {
+ assert(!(lhs != .stack and rhs == .stack));
if (isByRef(ty, self.target)) {
if (ty.zigTypeTag() == .Int) {
return self.binOpBigInt(lhs, rhs, ty, op);
@@ -2053,24 +2141,18 @@ fn binOp(self: *Self, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WVa
try self.addTag(Mir.Inst.Tag.fromOpcode(opcode));
- // save the result in a temporary
- const bin_local = try self.allocLocal(ty);
- try self.addLabel(.local_set, bin_local.local);
- return bin_local;
+ return WValue{ .stack = {} };
}
+/// Performs a binary operation for 16-bit floats.
+/// NOTE: Leaves the result value on the stack
fn binOpFloat16(self: *Self, lhs: WValue, rhs: WValue, op: Op) InnerError!WValue {
- const ext_lhs = try self.fpext(lhs, Type.f16, Type.f32);
- const ext_rhs = try self.fpext(rhs, Type.f16, Type.f32);
-
const opcode: wasm.Opcode = buildOpcode(.{ .op = op, .valtype1 = .f32, .signedness = .unsigned });
- try self.emitWValue(ext_lhs);
- try self.emitWValue(ext_rhs);
+ _ = try self.fpext(lhs, Type.f16, Type.f32);
+ _ = try self.fpext(rhs, Type.f16, Type.f32);
try self.addTag(Mir.Inst.Tag.fromOpcode(opcode));
- // re-use temporary local
- try self.addLabel(.local_set, ext_lhs.local);
- return self.fptrunc(ext_lhs, Type.f32, Type.f16);
+ return self.fptrunc(.{ .stack = {} }, Type.f32, Type.f16);
}
fn binOpBigInt(self: *Self, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue {
@@ -2083,13 +2165,16 @@ fn binOpBigInt(self: *Self, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerErr
}
const result = try self.allocStack(ty);
- const lhs_high_bit = try self.load(lhs, Type.u64, 0);
- const lhs_low_bit = try self.load(lhs, Type.u64, 8);
- const rhs_high_bit = try self.load(rhs, Type.u64, 0);
- const rhs_low_bit = try self.load(rhs, Type.u64, 8);
+ var lhs_high_bit = try (try self.load(lhs, Type.u64, 0)).toLocal(self, Type.u64);
+ defer lhs_high_bit.free(self);
+ var rhs_high_bit = try (try self.load(rhs, Type.u64, 0)).toLocal(self, Type.u64);
+ defer rhs_high_bit.free(self);
+ var high_op_res = try (try self.binOp(lhs_high_bit, rhs_high_bit, Type.u64, op)).toLocal(self, Type.u64);
+ defer high_op_res.free(self);
+ const lhs_low_bit = try self.load(lhs, Type.u64, 8);
+ const rhs_low_bit = try self.load(rhs, Type.u64, 8);
const low_op_res = try self.binOp(lhs_low_bit, rhs_low_bit, Type.u64, op);
- const high_op_res = try self.binOp(lhs_high_bit, rhs_high_bit, Type.u64, op);
const lt = if (op == .add) blk: {
break :blk try self.cmp(high_op_res, rhs_high_bit, Type.u64, .lt);
@@ -2097,7 +2182,8 @@ fn binOpBigInt(self: *Self, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerErr
break :blk try self.cmp(lhs_high_bit, rhs_high_bit, Type.u64, .lt);
} else unreachable;
const tmp = try self.intcast(lt, Type.u32, Type.u64);
- const tmp_op = try self.binOp(low_op_res, tmp, Type.u64, op);
+ var tmp_op = try (try self.binOp(low_op_res, tmp, Type.u64, op)).toLocal(self, Type.u64);
+ defer tmp_op.free(self);
try self.store(result, high_op_res, Type.u64, 0);
try self.store(result, tmp_op, Type.u64, 8);
@@ -2114,40 +2200,22 @@ fn airWrapBinOp(self: *Self, inst: Air.Inst.Index, op: Op) InnerError!WValue {
return self.fail("TODO: Implement wrapping arithmetic for vectors", .{});
}
- return self.wrapBinOp(lhs, rhs, ty, op);
+ return (try self.wrapBinOp(lhs, rhs, ty, op)).toLocal(self, ty);
}
+/// Performs a wrapping binary operation.
+/// Asserts rhs is not a stack value when lhs also isn't.
+/// NOTE: Leaves the result on the stack when its Type is <= 64 bits
fn wrapBinOp(self: *Self, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue {
- const bit_size = ty.intInfo(self.target).bits;
- var wasm_bits = toWasmBits(bit_size) orelse {
- return self.fail("TODO: Implement wrapping arithmetic for integers with bitsize: {d}\n", .{bit_size});
- };
-
- if (wasm_bits == 128) {
- const bin_op = try self.binOpBigInt(lhs, rhs, ty, op);
- return self.wrapOperand(bin_op, ty);
- }
-
- const opcode: wasm.Opcode = buildOpcode(.{
- .op = op,
- .valtype1 = typeToValtype(ty, self.target),
- .signedness = if (ty.isSignedInt()) .signed else .unsigned,
- });
-
- try self.emitWValue(lhs);
- try self.emitWValue(rhs);
- try self.addTag(Mir.Inst.Tag.fromOpcode(opcode));
- const bin_local = try self.allocLocal(ty);
- try self.addLabel(.local_set, bin_local.local);
-
+ const bin_local = try self.binOp(lhs, rhs, ty, op);
return self.wrapOperand(bin_local, ty);
}
/// Wraps an operand based on a given type's bitsize.
/// Asserts `Type` is <= 128 bits.
+/// NOTE: When the Type is <= 64 bits, leaves the value on top of the stack.
fn wrapOperand(self: *Self, operand: WValue, ty: Type) InnerError!WValue {
assert(ty.abiSize(self.target) <= 16);
- const result_local = try self.allocLocal(ty);
const bitsize = ty.intInfo(self.target).bits;
const wasm_bits = toWasmBits(bitsize) orelse {
return self.fail("TODO: Implement wrapOperand for bitsize '{d}'", .{bitsize});
@@ -2156,14 +2224,15 @@ fn wrapOperand(self: *Self, operand: WValue, ty: Type) InnerError!WValue {
if (wasm_bits == bitsize) return operand;
if (wasm_bits == 128) {
- const msb = try self.load(operand, Type.u64, 0);
+ assert(operand != .stack);
const lsb = try self.load(operand, Type.u64, 8);
const result_ptr = try self.allocStack(ty);
- try self.store(result_ptr, lsb, Type.u64, 8);
+ try self.emitWValue(result_ptr);
+ try self.store(.{ .stack = {} }, lsb, Type.u64, 8 + result_ptr.offset());
const result = (@as(u64, 1) << @intCast(u6, 64 - (wasm_bits - bitsize))) - 1;
try self.emitWValue(result_ptr);
- try self.emitWValue(msb);
+ _ = try self.load(operand, Type.u64, 0);
try self.addImm64(result);
try self.addTag(.i64_and);
try self.addMemArg(.i64_store, .{ .offset = result_ptr.offset(), .alignment = 8 });
@@ -2180,8 +2249,7 @@ fn wrapOperand(self: *Self, operand: WValue, ty: Type) InnerError!WValue {
try self.addTag(.i64_and);
} else unreachable;
- try self.addLabel(.local_set, result_local.local);
- return result_local;
+ return WValue{ .stack = {} };
}
fn lowerParentPtr(self: *Self, ptr_val: Value, ptr_child_ty: Type) InnerError!WValue {
@@ -2593,10 +2661,14 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: std.math.CompareOperator) Inner
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const operand_ty = self.air.typeOf(bin_op.lhs);
- return self.cmp(lhs, rhs, operand_ty, op);
+ return (try self.cmp(lhs, rhs, operand_ty, op)).toLocal(self, Type.u32); // comparison result is always 32 bits
}
+/// Compares two operands.
+/// Asserts rhs is not a stack value when the lhs isn't a stack value either
+/// NOTE: This leaves the result on top of the stack, rather than a new local.
fn cmp(self: *Self, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareOperator) InnerError!WValue {
+ assert(!(lhs != .stack and rhs == .stack));
if (ty.zigTypeTag() == .Optional and !ty.optionalReprIsPayload()) {
var buf: Type.Payload.ElemType = undefined;
const payload_ty = ty.optionalChild(&buf);
@@ -2638,15 +2710,12 @@ fn cmp(self: *Self, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareOper
});
try self.addTag(Mir.Inst.Tag.fromOpcode(opcode));
- const cmp_tmp = try self.allocLocal(Type.initTag(.i32)); // bool is always i32
- try self.addLabel(.local_set, cmp_tmp.local);
- return cmp_tmp;
+ return WValue{ .stack = {} };
}
+/// Compares 16-bit floats
+/// NOTE: The result value remains on top of the stack.
fn cmpFloat16(self: *Self, lhs: WValue, rhs: WValue, op: std.math.CompareOperator) InnerError!WValue {
- const ext_lhs = try self.fpext(lhs, Type.f16, Type.f32);
- const ext_rhs = try self.fpext(rhs, Type.f16, Type.f32);
-
const opcode: wasm.Opcode = buildOpcode(.{
.op = switch (op) {
.lt => .lt,
@@ -2659,13 +2728,11 @@ fn cmpFloat16(self: *Self, lhs: WValue, rhs: WValue, op: std.math.CompareOperato
.valtype1 = .f32,
.signedness = .unsigned,
});
- try self.emitWValue(ext_lhs);
- try self.emitWValue(ext_rhs);
+ _ = try self.fpext(lhs, Type.f16, Type.f32);
+ _ = try self.fpext(rhs, Type.f16, Type.f32);
try self.addTag(Mir.Inst.Tag.fromOpcode(opcode));
- const result = try self.allocLocal(Type.initTag(.i32)); // bool is always i32
- try self.addLabel(.local_set, result.local);
- return result;
+ return WValue{ .stack = {} };
}
fn airCmpVector(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -2726,21 +2793,23 @@ fn airNot(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
switch (wasm_bits) {
32 => {
const bin_op = try self.binOp(operand, .{ .imm32 = ~@as(u32, 0) }, operand_ty, .xor);
- return self.wrapOperand(bin_op, operand_ty);
+ return (try self.wrapOperand(bin_op, operand_ty)).toLocal(self, operand_ty);
},
64 => {
const bin_op = try self.binOp(operand, .{ .imm64 = ~@as(u64, 0) }, operand_ty, .xor);
- return self.wrapOperand(bin_op, operand_ty);
+ return (try self.wrapOperand(bin_op, operand_ty)).toLocal(self, operand_ty);
},
128 => {
const result_ptr = try self.allocStack(operand_ty);
+ try self.emitWValue(result_ptr);
const msb = try self.load(operand, Type.u64, 0);
- const lsb = try self.load(operand, Type.u64, 8);
-
const msb_xor = try self.binOp(msb, .{ .imm64 = ~@as(u64, 0) }, Type.u64, .xor);
+ try self.store(.{ .stack = {} }, msb_xor, Type.u64, 0 + result_ptr.offset());
+
+ try self.emitWValue(result_ptr);
+ const lsb = try self.load(operand, Type.u64, 8);
const lsb_xor = try self.binOp(lsb, .{ .imm64 = ~@as(u64, 0) }, Type.u64, .xor);
- try self.store(result_ptr, msb_xor, Type.u64, 0);
- try self.store(result_ptr, lsb_xor, Type.u64, 8);
+ try self.store(result_ptr, lsb_xor, Type.u64, 8 + result_ptr.offset());
return result_ptr;
},
else => unreachable,
@@ -2828,7 +2897,8 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
}
}
- return self.load(operand, field_ty, offset);
+ const field = try self.load(operand, field_ty, offset);
+ return field.toLocal(self, field_ty);
}
fn airSwitchBr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -3038,7 +3108,9 @@ fn airUnwrapErrUnionPayload(self: *Self, inst: Air.Inst.Index, op_is_ptr: bool)
if (op_is_ptr or isByRef(payload_ty, self.target)) {
return self.buildPointerOffset(operand, pl_offset, .new);
}
- return self.load(operand, payload_ty, pl_offset);
+
+ const payload = try self.load(operand, payload_ty, pl_offset);
+ return payload.toLocal(self, payload_ty);
}
fn airUnwrapErrUnionError(self: *Self, inst: Air.Inst.Index, op_is_ptr: bool) InnerError!WValue {
@@ -3058,7 +3130,8 @@ fn airUnwrapErrUnionError(self: *Self, inst: Air.Inst.Index, op_is_ptr: bool) In
return operand;
}
- return self.load(operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(payload_ty, self.target)));
+ const error_val = try self.load(operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(payload_ty, self.target)));
+ return error_val.toLocal(self, Type.anyerror);
}
fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -3124,12 +3197,13 @@ fn airIntcast(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
return self.fail("todo Wasm intcast for bitsize > 128", .{});
}
- return self.intcast(operand, operand_ty, ty);
+ return (try self.intcast(operand, operand_ty, ty)).toLocal(self, ty);
}
/// Upcasts or downcasts an integer based on the given and wanted types,
/// and stores the result in a new operand.
/// Asserts type's bitsize <= 128
+/// NOTE: May leave the result on the top of the stack.
fn intcast(self: *Self, operand: WValue, given: Type, wanted: Type) InnerError!WValue {
const given_info = given.intInfo(self.target);
const wanted_info = wanted.intInfo(self.target);
@@ -3152,25 +3226,22 @@ fn intcast(self: *Self, operand: WValue, given: Type, wanted: Type) InnerError!W
} else if (wanted_bits == 128) {
// for 128bit integers we store the integer in the virtual stack, rather than a local
const stack_ptr = try self.allocStack(wanted);
+ try self.emitWValue(stack_ptr);
// for 32 bit integers, we first coerce the value into a 64 bit integer before storing it
// meaning less store operations are required.
const lhs = if (op_bits == 32) blk: {
- const tmp = try self.intcast(
- operand,
- given,
- if (wanted.isSignedInt()) Type.i64 else Type.u64,
- );
- break :blk tmp;
+ break :blk try self.intcast(operand, given, if (wanted.isSignedInt()) Type.i64 else Type.u64);
} else operand;
// store msb first
- try self.store(stack_ptr, lhs, Type.u64, 0);
+ try self.store(.{ .stack = {} }, lhs, Type.u64, 0 + stack_ptr.offset());
// For signed integers we shift msb by 63 (64bit integer - 1 sign bit) and store remaining value
if (wanted.isSignedInt()) {
+ try self.emitWValue(stack_ptr);
const shr = try self.binOp(lhs, .{ .imm64 = 63 }, Type.i64, .shr);
- try self.store(stack_ptr, shr, Type.u64, 8);
+ try self.store(.{ .stack = {} }, shr, Type.u64, 8 + stack_ptr.offset());
} else {
// Ensure memory of lsb is zero'd
try self.store(stack_ptr, .{ .imm64 = 0 }, Type.u64, 8);
@@ -3178,9 +3249,7 @@ fn intcast(self: *Self, operand: WValue, given: Type, wanted: Type) InnerError!W
return stack_ptr;
} else return self.load(operand, wanted, 0);
- const result = try self.allocLocal(wanted);
- try self.addLabel(.local_set, result.local);
- return result;
+ return WValue{ .stack = {} };
}
fn airIsNull(self: *Self, inst: Air.Inst.Index, opcode: wasm.Opcode, op_kind: enum { value, ptr }) InnerError!WValue {
@@ -3189,9 +3258,12 @@ fn airIsNull(self: *Self, inst: Air.Inst.Index, opcode: wasm.Opcode, op_kind: en
const op_ty = self.air.typeOf(un_op);
const optional_ty = if (op_kind == .ptr) op_ty.childType() else op_ty;
- return self.isNull(operand, optional_ty, opcode);
+ const is_null = try self.isNull(operand, optional_ty, opcode);
+ return is_null.toLocal(self, optional_ty);
}
+/// For a given type and operand, checks if it's considered `null`.
+/// NOTE: Leaves the result on the stack
fn isNull(self: *Self, operand: WValue, optional_ty: Type, opcode: wasm.Opcode) InnerError!WValue {
try self.emitWValue(operand);
if (!optional_ty.optionalReprIsPayload()) {
@@ -3208,9 +3280,7 @@ fn isNull(self: *Self, operand: WValue, optional_ty: Type, opcode: wasm.Opcode)
try self.addImm32(0);
try self.addTag(Mir.Inst.Tag.fromOpcode(opcode));
- const is_null_tmp = try self.allocLocal(Type.initTag(.i32));
- try self.addLabel(.local_set, is_null_tmp.local);
- return is_null_tmp;
+ return WValue{ .stack = {} };
}
fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -3228,7 +3298,8 @@ fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
return self.buildPointerOffset(operand, offset, .new);
}
- return self.load(operand, payload_ty, @intCast(u32, offset));
+ const payload = try self.load(operand, payload_ty, @intCast(u32, offset));
+ return payload.toLocal(self, payload_ty);
}
fn airOptionalPayloadPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -3331,7 +3402,8 @@ fn airSliceLen(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
- return self.load(operand, Type.usize, self.ptrSize());
+ const len = try self.load(operand, Type.usize, self.ptrSize());
+ return len.toLocal(self, Type.usize);
}
fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -3345,8 +3417,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const elem_size = elem_ty.abiSize(self.target);
// load pointer onto stack
- const slice_ptr = try self.load(slice, Type.usize, 0);
- try self.addLabel(.local_get, slice_ptr.local);
+ _ = try self.load(slice, Type.usize, 0);
// calculate index into slice
try self.emitWValue(index);
@@ -3360,7 +3431,9 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
if (isByRef(elem_ty, self.target)) {
return result;
}
- return self.load(result, elem_ty, 0);
+
+ const elem_val = try self.load(result, elem_ty, 0);
+ return elem_val.toLocal(self, elem_ty);
}
fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -3373,8 +3446,7 @@ fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const slice = try self.resolveInst(bin_op.lhs);
const index = try self.resolveInst(bin_op.rhs);
- const slice_ptr = try self.load(slice, Type.usize, 0);
- try self.addLabel(.local_get, slice_ptr.local);
+ _ = try self.load(slice, Type.usize, 0);
// calculate index into slice
try self.emitWValue(index);
@@ -3382,7 +3454,7 @@ fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
try self.addTag(.i32_mul);
try self.addTag(.i32_add);
- const result = try self.allocLocal(Type.initTag(.i32));
+ const result = try self.allocLocal(Type.i32);
try self.addLabel(.local_set, result.local);
return result;
}
@@ -3391,7 +3463,8 @@ fn airSlicePtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
if (self.liveness.isUnused(inst)) return WValue{ .none = {} };
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
- return self.load(operand, Type.usize, 0);
+ const ptr = try self.load(operand, Type.usize, 0);
+ return ptr.toLocal(self, Type.usize);
}
fn airTrunc(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -3406,13 +3479,13 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
return self.fail("TODO: Implement wasm integer truncation for integer bitsize: {d}", .{int_info.bits});
}
- const result = try self.intcast(operand, op_ty, wanted_ty);
+ var result = try self.intcast(operand, op_ty, wanted_ty);
const wanted_bits = wanted_ty.intInfo(self.target).bits;
const wasm_bits = toWasmBits(wanted_bits).?;
if (wasm_bits != wanted_bits) {
- return self.wrapOperand(result, wanted_ty);
+ result = try self.wrapOperand(result, wanted_ty);
}
- return result;
+ return result.toLocal(self, wanted_ty);
}
fn airBoolToInt(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -3465,8 +3538,7 @@ fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
// load pointer onto the stack
if (ptr_ty.isSlice()) {
- const ptr_local = try self.load(ptr, Type.usize, 0);
- try self.addLabel(.local_get, ptr_local.local);
+ _ = try self.load(ptr, Type.usize, 0);
} else {
try self.lowerToStack(ptr);
}
@@ -3477,12 +3549,15 @@ fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
try self.addTag(.i32_mul);
try self.addTag(.i32_add);
- const result = try self.allocLocal(elem_ty);
+ var result = try self.allocLocal(elem_ty);
try self.addLabel(.local_set, result.local);
if (isByRef(elem_ty, self.target)) {
return result;
}
- return self.load(result, elem_ty, 0);
+ defer result.free(self); // only free if it's not returned like above
+
+ const elem_val = try self.load(result, elem_ty, 0);
+ return elem_val.toLocal(self, elem_ty);
}
fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -3498,8 +3573,7 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
// load pointer onto the stack
if (ptr_ty.isSlice()) {
- const ptr_local = try self.load(ptr, Type.usize, 0);
- try self.addLabel(.local_get, ptr_local.local);
+ _ = try self.load(ptr, Type.usize, 0);
} else {
try self.lowerToStack(ptr);
}
@@ -3510,7 +3584,7 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
try self.addTag(.i32_mul);
try self.addTag(.i32_add);
- const result = try self.allocLocal(Type.initTag(.i32));
+ const result = try self.allocLocal(Type.i32);
try self.addLabel(.local_set, result.local);
return result;
}
@@ -3598,7 +3672,7 @@ fn memset(self: *Self, ptr: WValue, len: WValue, value: WValue) InnerError!void
else => {
// TODO: We should probably lower this to a call to compiler_rt
// But for now, we implement it manually
- const offset = try self.allocLocal(Type.usize); // local for counter
+ const offset = try self.ensureAllocLocal(Type.usize); // local for counter
// outer block to jump to when loop is done
try self.startBlock(.block, wasm.block_empty);
try self.startBlock(.loop, wasm.block_empty);
@@ -3655,13 +3729,16 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
try self.addTag(.i32_mul);
try self.addTag(.i32_add);
- const result = try self.allocLocal(Type.usize);
+ var result = try self.allocLocal(Type.usize);
try self.addLabel(.local_set, result.local);
if (isByRef(elem_ty, self.target)) {
return result;
}
- return self.load(result, elem_ty, 0);
+ defer result.free(self); // only free if no longer needed and not returned like above
+
+ const elem_val = try self.load(result, elem_ty, 0);
+ return elem_val.toLocal(self, elem_ty);
}
fn airFloatToInt(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -3684,11 +3761,8 @@ fn airFloatToInt(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
.signedness = if (dest_ty.isSignedInt()) .signed else .unsigned,
});
try self.addTag(Mir.Inst.Tag.fromOpcode(op));
-
- const result = try self.allocLocal(dest_ty);
- try self.addLabel(.local_set, result.local);
-
- return self.wrapOperand(result, dest_ty);
+ const wrapped = try self.wrapOperand(.{ .stack = {} }, dest_ty);
+ return wrapped.toLocal(self, dest_ty);
}
fn airIntToFloat(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -3886,24 +3960,19 @@ fn cmpOptionals(self: *Self, lhs: WValue, rhs: WValue, operand_ty: Type, op: std
const payload_ty = operand_ty.optionalChild(&buf);
const offset = @intCast(u32, operand_ty.abiSize(self.target) - payload_ty.abiSize(self.target));
- const lhs_is_null = try self.isNull(lhs, operand_ty, .i32_eq);
- const rhs_is_null = try self.isNull(rhs, operand_ty, .i32_eq);
-
// We store the final result in here that will be validated
// if the optional is truly equal.
- const result = try self.allocLocal(Type.initTag(.i32));
+ var result = try self.ensureAllocLocal(Type.initTag(.i32));
+ defer result.free(self);
try self.startBlock(.block, wasm.block_empty);
- try self.emitWValue(lhs_is_null);
- try self.emitWValue(rhs_is_null);
+ _ = try self.isNull(lhs, operand_ty, .i32_eq);
+ _ = try self.isNull(rhs, operand_ty, .i32_eq);
try self.addTag(.i32_ne); // inverse so we can exit early
try self.addLabel(.br_if, 0);
- const lhs_pl = try self.load(lhs, payload_ty, offset);
- const rhs_pl = try self.load(rhs, payload_ty, offset);
-
- try self.emitWValue(lhs_pl);
- try self.emitWValue(rhs_pl);
+ _ = try self.load(lhs, payload_ty, offset);
+ _ = try self.load(rhs, payload_ty, offset);
const opcode = buildOpcode(.{ .op = .ne, .valtype1 = typeToValtype(payload_ty, self.target) });
try self.addTag(Mir.Inst.Tag.fromOpcode(opcode));
try self.addLabel(.br_if, 0);
@@ -3915,26 +3984,29 @@ fn cmpOptionals(self: *Self, lhs: WValue, rhs: WValue, operand_ty: Type, op: std
try self.emitWValue(result);
try self.addImm32(0);
try self.addTag(if (op == .eq) .i32_ne else .i32_eq);
- try self.addLabel(.local_set, result.local);
- return result;
+ return WValue{ .stack = {} };
}
/// Compares big integers by checking both its high bits and low bits.
+/// NOTE: Leaves the result of the comparison on top of the stack.
/// TODO: Lower this to compiler_rt call when bitsize > 128
fn cmpBigInt(self: *Self, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue {
assert(operand_ty.abiSize(self.target) >= 16);
+ assert(!(lhs != .stack and rhs == .stack));
if (operand_ty.intInfo(self.target).bits > 128) {
return self.fail("TODO: Support cmpBigInt for integer bitsize: '{d}'", .{operand_ty.intInfo(self.target).bits});
}
- const lhs_high_bit = try self.load(lhs, Type.u64, 0);
- const lhs_low_bit = try self.load(lhs, Type.u64, 8);
- const rhs_high_bit = try self.load(rhs, Type.u64, 0);
- const rhs_low_bit = try self.load(rhs, Type.u64, 8);
+ var lhs_high_bit = try (try self.load(lhs, Type.u64, 0)).toLocal(self, Type.u64);
+ defer lhs_high_bit.free(self);
+ var rhs_high_bit = try (try self.load(rhs, Type.u64, 0)).toLocal(self, Type.u64);
+ defer rhs_high_bit.free(self);
switch (op) {
.eq, .neq => {
const xor_high = try self.binOp(lhs_high_bit, rhs_high_bit, Type.u64, .xor);
+ const lhs_low_bit = try self.load(lhs, Type.u64, 8);
+ const rhs_low_bit = try self.load(rhs, Type.u64, 8);
const xor_low = try self.binOp(lhs_low_bit, rhs_low_bit, Type.u64, .xor);
const or_result = try self.binOp(xor_high, xor_low, Type.u64, .@"or");
@@ -3946,20 +4018,17 @@ fn cmpBigInt(self: *Self, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.ma
},
else => {
const ty = if (operand_ty.isSignedInt()) Type.i64 else Type.u64;
- const high_bit_eql = try self.cmp(lhs_high_bit, rhs_high_bit, ty, .eq);
- const high_bit_cmp = try self.cmp(lhs_high_bit, rhs_high_bit, ty, op);
- const low_bit_cmp = try self.cmp(lhs_low_bit, rhs_low_bit, ty, op);
-
- try self.emitWValue(low_bit_cmp);
- try self.emitWValue(high_bit_cmp);
- try self.emitWValue(high_bit_eql);
+ // leave those value on top of the stack for '.select'
+ const lhs_low_bit = try self.load(lhs, Type.u64, 8);
+ const rhs_low_bit = try self.load(rhs, Type.u64, 8);
+ _ = try self.cmp(lhs_low_bit, rhs_low_bit, ty, op);
+ _ = try self.cmp(lhs_high_bit, rhs_high_bit, ty, op);
+ _ = try self.cmp(lhs_high_bit, rhs_high_bit, ty, .eq);
try self.addTag(.select);
},
}
- const result = try self.allocLocal(Type.initTag(.i32));
- try self.addLabel(.local_set, result.local);
- return result;
+ return WValue{ .stack = {} };
}
fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -3999,7 +4068,8 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const offset = if (layout.tag_align < layout.payload_align) blk: {
break :blk @intCast(u32, layout.payload_size);
} else @as(u32, 0);
- return self.load(operand, tag_ty, offset);
+ const tag = try self.load(operand, tag_ty, offset);
+ return tag.toLocal(self, tag_ty);
}
fn airFpext(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -4009,19 +4079,20 @@ fn airFpext(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const dest_ty = self.air.typeOfIndex(inst);
const operand = try self.resolveInst(ty_op.operand);
- return self.fpext(operand, self.air.typeOf(ty_op.operand), dest_ty);
+ const extended = try self.fpext(operand, self.air.typeOf(ty_op.operand), dest_ty);
+ return extended.toLocal(self, dest_ty);
}
+/// Extends a float from a given `Type` to a larger wanted `Type`
+/// NOTE: Leaves the result on the stack
fn fpext(self: *Self, operand: WValue, given: Type, wanted: Type) InnerError!WValue {
const given_bits = given.floatBits(self.target);
const wanted_bits = wanted.floatBits(self.target);
if (wanted_bits == 64 and given_bits == 32) {
- const result = try self.allocLocal(wanted);
try self.emitWValue(operand);
try self.addTag(.f64_promote_f32);
- try self.addLabel(.local_set, result.local);
- return result;
+ return WValue{ .stack = {} };
} else if (given_bits == 16) {
// call __extendhfsf2(f16) f32
const f32_result = try self.callIntrinsic(
@@ -4035,11 +4106,8 @@ fn fpext(self: *Self, operand: WValue, given: Type, wanted: Type) InnerError!WVa
return f32_result;
}
if (wanted_bits == 64) {
- const result = try self.allocLocal(wanted);
- try self.emitWValue(f32_result);
try self.addTag(.f64_promote_f32);
- try self.addLabel(.local_set, result.local);
- return result;
+ return WValue{ .stack = {} };
}
return self.fail("TODO: Implement 'fpext' for floats with bitsize: {d}", .{wanted_bits});
} else {
@@ -4054,26 +4122,25 @@ fn airFptrunc(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const dest_ty = self.air.typeOfIndex(inst);
const operand = try self.resolveInst(ty_op.operand);
- return self.fptrunc(operand, self.air.typeOf(ty_op.operand), dest_ty);
+ const trunc = try self.fptrunc(operand, self.air.typeOf(ty_op.operand), dest_ty);
+ return trunc.toLocal(self, dest_ty);
}
+/// Truncates a float from a given `Type` to its wanted `Type`
+/// NOTE: The result value remains on the stack
fn fptrunc(self: *Self, operand: WValue, given: Type, wanted: Type) InnerError!WValue {
const given_bits = given.floatBits(self.target);
const wanted_bits = wanted.floatBits(self.target);
if (wanted_bits == 32 and given_bits == 64) {
- const result = try self.allocLocal(wanted);
try self.emitWValue(operand);
try self.addTag(.f32_demote_f64);
- try self.addLabel(.local_set, result.local);
- return result;
+ return WValue{ .stack = {} };
} else if (wanted_bits == 16) {
const op: WValue = if (given_bits == 64) blk: {
- const tmp = try self.allocLocal(Type.f32);
try self.emitWValue(operand);
try self.addTag(.f32_demote_f64);
- try self.addLabel(.local_set, tmp.local);
- break :blk tmp;
+ break :blk WValue{ .stack = {} };
} else operand;
// call __truncsfhf2(f32) f16
@@ -4158,12 +4225,9 @@ fn airPopcount(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
switch (wasm_bits) {
128 => {
- const msb = try self.load(operand, Type.u64, 0);
- const lsb = try self.load(operand, Type.u64, 8);
-
- try self.emitWValue(msb);
+ _ = try self.load(operand, Type.u64, 0);
try self.addTag(.i64_popcnt);
- try self.emitWValue(lsb);
+ _ = try self.load(operand, Type.u64, 8);
try self.addTag(.i64_popcnt);
try self.addTag(.i64_add);
try self.addTag(.i32_wrap_i64);
@@ -4267,24 +4331,26 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index, op: Op) InnerError!W
// for signed integers, we first apply signed shifts by the difference in bits
// to get the signed value, as we store it internally as 2's complement.
- const lhs = if (wasm_bits != int_info.bits and is_signed) blk: {
- break :blk try self.signAbsValue(lhs_op, lhs_ty);
+ var lhs = if (wasm_bits != int_info.bits and is_signed) blk: {
+ break :blk try (try self.signAbsValue(lhs_op, lhs_ty)).toLocal(self, lhs_ty);
} else lhs_op;
- const rhs = if (wasm_bits != int_info.bits and is_signed) blk: {
- break :blk try self.signAbsValue(rhs_op, lhs_ty);
+ var rhs = if (wasm_bits != int_info.bits and is_signed) blk: {
+ break :blk try (try self.signAbsValue(rhs_op, lhs_ty)).toLocal(self, lhs_ty);
} else rhs_op;
- const bin_op = try self.binOp(lhs, rhs, lhs_ty, op);
- const result = if (wasm_bits != int_info.bits) blk: {
- break :blk try self.wrapOperand(bin_op, lhs_ty);
+ var bin_op = try (try self.binOp(lhs, rhs, lhs_ty, op)).toLocal(self, lhs_ty);
+ defer bin_op.free(self);
+ var result = if (wasm_bits != int_info.bits) blk: {
+ break :blk try (try self.wrapOperand(bin_op, lhs_ty)).toLocal(self, lhs_ty);
} else bin_op;
+ defer result.free(self); // no-op when wasm_bits == int_info.bits
const cmp_op: std.math.CompareOperator = if (op == .sub) .gt else .lt;
const overflow_bit: WValue = if (is_signed) blk: {
if (wasm_bits == int_info.bits) {
const cmp_zero = try self.cmp(rhs, zero, lhs_ty, cmp_op);
const lt = try self.cmp(bin_op, lhs, lhs_ty, .lt);
- break :blk try self.binOp(cmp_zero, lt, Type.u32, .xor); // result of cmp_zero and lt is always 32bit
+ break :blk try self.binOp(cmp_zero, lt, Type.u32, .xor);
}
const abs = try self.signAbsValue(bin_op, lhs_ty);
break :blk try self.cmp(abs, bin_op, lhs_ty, .neq);
@@ -4292,11 +4358,22 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index, op: Op) InnerError!W
try self.cmp(bin_op, lhs, lhs_ty, cmp_op)
else
try self.cmp(bin_op, result, lhs_ty, .neq);
+ var overflow_local = try overflow_bit.toLocal(self, Type.u32);
+ defer overflow_local.free(self);
const result_ptr = try self.allocStack(self.air.typeOfIndex(inst));
try self.store(result_ptr, result, lhs_ty, 0);
const offset = @intCast(u32, lhs_ty.abiSize(self.target));
- try self.store(result_ptr, overflow_bit, Type.initTag(.u1), offset);
+ try self.store(result_ptr, overflow_local, Type.initTag(.u1), offset);
+
+ // in this case, we performed a signAbsValue which created a temporary local
+ // so let's free this so it can be re-used instead.
+ // In the other case we do not want to free it, because that would free the
+ // resolved instructions which may be referenced by other instructions.
+ if (wasm_bits != int_info.bits and is_signed) {
+ lhs.free(self);
+ rhs.free(self);
+ }
return result_ptr;
}
@@ -4309,52 +4386,58 @@ fn airAddSubWithOverflowBigInt(self: *Self, lhs: WValue, rhs: WValue, ty: Type,
return self.fail("TODO: Implement @{{add/sub}}WithOverflow for integer bitsize '{d}'", .{int_info.bits});
}
- const lhs_high_bit = try self.load(lhs, Type.u64, 0);
- const lhs_low_bit = try self.load(lhs, Type.u64, 8);
- const rhs_high_bit = try self.load(rhs, Type.u64, 0);
- const rhs_low_bit = try self.load(rhs, Type.u64, 8);
+ var lhs_high_bit = try (try self.load(lhs, Type.u64, 0)).toLocal(self, Type.u64);
+ defer lhs_high_bit.free(self);
+ var lhs_low_bit = try (try self.load(lhs, Type.u64, 8)).toLocal(self, Type.u64);
+ defer lhs_low_bit.free(self);
+ var rhs_high_bit = try (try self.load(rhs, Type.u64, 0)).toLocal(self, Type.u64);
+ defer rhs_high_bit.free(self);
+ var rhs_low_bit = try (try self.load(rhs, Type.u64, 8)).toLocal(self, Type.u64);
+ defer rhs_low_bit.free(self);
- const low_op_res = try self.binOp(lhs_low_bit, rhs_low_bit, Type.u64, op);
- const high_op_res = try self.binOp(lhs_high_bit, rhs_high_bit, Type.u64, op);
+ var low_op_res = try (try self.binOp(lhs_low_bit, rhs_low_bit, Type.u64, op)).toLocal(self, Type.u64);
+ defer low_op_res.free(self);
+ var high_op_res = try (try self.binOp(lhs_high_bit, rhs_high_bit, Type.u64, op)).toLocal(self, Type.u64);
+ defer high_op_res.free(self);
- const lt = if (op == .add) blk: {
- break :blk try self.cmp(high_op_res, lhs_high_bit, Type.u64, .lt);
+ var lt = if (op == .add) blk: {
+ break :blk try (try self.cmp(high_op_res, lhs_high_bit, Type.u64, .lt)).toLocal(self, Type.u32);
} else if (op == .sub) blk: {
- break :blk try self.cmp(lhs_high_bit, rhs_high_bit, Type.u64, .lt);
+ break :blk try (try self.cmp(lhs_high_bit, rhs_high_bit, Type.u64, .lt)).toLocal(self, Type.u32);
} else unreachable;
- const tmp = try self.intcast(lt, Type.u32, Type.u64);
- const tmp_op = try self.binOp(low_op_res, tmp, Type.u64, op);
+ defer lt.free(self);
+ var tmp = try (try self.intcast(lt, Type.u32, Type.u64)).toLocal(self, Type.u64);
+ defer tmp.free(self);
+ var tmp_op = try (try self.binOp(low_op_res, tmp, Type.u64, op)).toLocal(self, Type.u64);
+ defer tmp_op.free(self);
const overflow_bit = if (is_signed) blk: {
- const xor_op = try self.binOp(lhs_low_bit, tmp_op, Type.u64, .xor);
const xor_low = try self.binOp(lhs_low_bit, rhs_low_bit, Type.u64, .xor);
const to_wrap = if (op == .add) wrap: {
break :wrap try self.binOp(xor_low, .{ .imm64 = ~@as(u64, 0) }, Type.u64, .xor);
} else xor_low;
+ const xor_op = try self.binOp(lhs_low_bit, tmp_op, Type.u64, .xor);
const wrap = try self.binOp(to_wrap, xor_op, Type.u64, .@"and");
break :blk try self.cmp(wrap, .{ .imm64 = 0 }, Type.i64, .lt); // i64 because signed
} else blk: {
- const eq = try self.cmp(tmp_op, lhs_low_bit, Type.u64, .eq);
- const op_eq = try self.cmp(tmp_op, lhs_low_bit, Type.u64, if (op == .add) .lt else .gt);
-
const first_arg = if (op == .sub) arg: {
break :arg try self.cmp(high_op_res, lhs_high_bit, Type.u64, .gt);
} else lt;
try self.emitWValue(first_arg);
- try self.emitWValue(op_eq);
- try self.emitWValue(eq);
+ _ = try self.cmp(tmp_op, lhs_low_bit, Type.u64, if (op == .add) .lt else .gt);
+ _ = try self.cmp(tmp_op, lhs_low_bit, Type.u64, .eq);
try self.addTag(.select);
- const overflow_bit = try self.allocLocal(Type.initTag(.u1));
- try self.addLabel(.local_set, overflow_bit.local);
- break :blk overflow_bit;
+ break :blk WValue{ .stack = {} };
};
+ var overflow_local = try overflow_bit.toLocal(self, Type.initTag(.u1));
+ defer overflow_local.free(self);
const result_ptr = try self.allocStack(result_ty);
try self.store(result_ptr, high_op_res, Type.u64, 0);
try self.store(result_ptr, tmp_op, Type.u64, 8);
- try self.store(result_ptr, overflow_bit, Type.initTag(.u1), 16);
+ try self.store(result_ptr, overflow_local, Type.initTag(.u1), 16);
return result_ptr;
}
@@ -4376,24 +4459,31 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
return self.fail("TODO: Implement shl_with_overflow for integer bitsize: {d}", .{int_info.bits});
};
- const shl = try self.binOp(lhs, rhs, lhs_ty, .shl);
- const result = if (wasm_bits != int_info.bits) blk: {
- break :blk try self.wrapOperand(shl, lhs_ty);
+ var shl = try (try self.binOp(lhs, rhs, lhs_ty, .shl)).toLocal(self, lhs_ty);
+ defer shl.free(self);
+ var result = if (wasm_bits != int_info.bits) blk: {
+ break :blk try (try self.wrapOperand(shl, lhs_ty)).toLocal(self, lhs_ty);
} else shl;
+ defer result.free(self); // it's a no-op to free the same local twice (when wasm_bits == int_info.bits)
const overflow_bit = if (wasm_bits != int_info.bits and is_signed) blk: {
+ // emit lhs to stack to we can keep 'wrapped' on the stack also
+ try self.emitWValue(lhs);
const abs = try self.signAbsValue(shl, lhs_ty);
const wrapped = try self.wrapBinOp(abs, rhs, lhs_ty, .shr);
- break :blk try self.cmp(lhs, wrapped, lhs_ty, .neq);
+ break :blk try self.cmp(.{ .stack = {} }, wrapped, lhs_ty, .neq);
} else blk: {
+ try self.emitWValue(lhs);
const shr = try self.binOp(result, rhs, lhs_ty, .shr);
- break :blk try self.cmp(lhs, shr, lhs_ty, .neq);
+ break :blk try self.cmp(.{ .stack = {} }, shr, lhs_ty, .neq);
};
+ var overflow_local = try overflow_bit.toLocal(self, Type.initTag(.u1));
+ defer overflow_local.free(self);
const result_ptr = try self.allocStack(self.air.typeOfIndex(inst));
try self.store(result_ptr, result, lhs_ty, 0);
const offset = @intCast(u32, lhs_ty.abiSize(self.target));
- try self.store(result_ptr, overflow_bit, Type.initTag(.u1), offset);
+ try self.store(result_ptr, overflow_local, Type.initTag(.u1), offset);
return result_ptr;
}
@@ -4411,7 +4501,9 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
// We store the bit if it's overflowed or not in this. As it's zero-initialized
// we only need to update it if an overflow (or underflow) occurred.
- const overflow_bit = try self.allocLocal(Type.initTag(.u1));
+ var overflow_bit = try self.ensureAllocLocal(Type.initTag(.u1));
+ defer overflow_bit.free(self);
+
const int_info = lhs_ty.intInfo(self.target);
const wasm_bits = toWasmBits(int_info.bits) orelse {
return self.fail("TODO: Implement overflow arithmetic for integer bitsize: {d}", .{int_info.bits});
@@ -4432,49 +4524,49 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const new_ty = if (int_info.signedness == .signed) Type.i64 else Type.u64;
const lhs_upcast = try self.intcast(lhs, lhs_ty, new_ty);
const rhs_upcast = try self.intcast(rhs, lhs_ty, new_ty);
- const bin_op = try self.binOp(lhs_upcast, rhs_upcast, new_ty, .mul);
+ const bin_op = try (try self.binOp(lhs_upcast, rhs_upcast, new_ty, .mul)).toLocal(self, new_ty);
if (int_info.signedness == .unsigned) {
const shr = try self.binOp(bin_op, .{ .imm64 = int_info.bits }, new_ty, .shr);
const wrap = try self.intcast(shr, new_ty, lhs_ty);
- const cmp_res = try self.cmp(wrap, zero, lhs_ty, .neq);
- try self.emitWValue(cmp_res);
+ _ = try self.cmp(wrap, zero, lhs_ty, .neq);
try self.addLabel(.local_set, overflow_bit.local);
break :blk try self.intcast(bin_op, new_ty, lhs_ty);
} else {
- const down_cast = try self.intcast(bin_op, new_ty, lhs_ty);
- const shr = try self.binOp(down_cast, .{ .imm32 = int_info.bits - 1 }, lhs_ty, .shr);
+ const down_cast = try (try self.intcast(bin_op, new_ty, lhs_ty)).toLocal(self, lhs_ty);
+ var shr = try (try self.binOp(down_cast, .{ .imm32 = int_info.bits - 1 }, lhs_ty, .shr)).toLocal(self, lhs_ty);
+ defer shr.free(self);
const shr_res = try self.binOp(bin_op, .{ .imm64 = int_info.bits }, new_ty, .shr);
const down_shr_res = try self.intcast(shr_res, new_ty, lhs_ty);
- const cmp_res = try self.cmp(down_shr_res, shr, lhs_ty, .neq);
- try self.emitWValue(cmp_res);
+ _ = try self.cmp(down_shr_res, shr, lhs_ty, .neq);
try self.addLabel(.local_set, overflow_bit.local);
break :blk down_cast;
}
} else if (int_info.signedness == .signed) blk: {
const lhs_abs = try self.signAbsValue(lhs, lhs_ty);
const rhs_abs = try self.signAbsValue(rhs, lhs_ty);
- const bin_op = try self.binOp(lhs_abs, rhs_abs, lhs_ty, .mul);
+ const bin_op = try (try self.binOp(lhs_abs, rhs_abs, lhs_ty, .mul)).toLocal(self, lhs_ty);
const mul_abs = try self.signAbsValue(bin_op, lhs_ty);
- const cmp_op = try self.cmp(mul_abs, bin_op, lhs_ty, .neq);
- try self.emitWValue(cmp_op);
+ _ = try self.cmp(mul_abs, bin_op, lhs_ty, .neq);
try self.addLabel(.local_set, overflow_bit.local);
break :blk try self.wrapOperand(bin_op, lhs_ty);
} else blk: {
- const bin_op = try self.binOp(lhs, rhs, lhs_ty, .mul);
+ var bin_op = try (try self.binOp(lhs, rhs, lhs_ty, .mul)).toLocal(self, lhs_ty);
+ defer bin_op.free(self);
const shift_imm = if (wasm_bits == 32)
WValue{ .imm32 = int_info.bits }
else
WValue{ .imm64 = int_info.bits };
const shr = try self.binOp(bin_op, shift_imm, lhs_ty, .shr);
- const cmp_op = try self.cmp(shr, zero, lhs_ty, .neq);
- try self.emitWValue(cmp_op);
+ _ = try self.cmp(shr, zero, lhs_ty, .neq);
try self.addLabel(.local_set, overflow_bit.local);
break :blk try self.wrapOperand(bin_op, lhs_ty);
};
+ var bin_op_local = try bin_op.toLocal(self, lhs_ty);
+ defer bin_op_local.free(self);
const result_ptr = try self.allocStack(self.air.typeOfIndex(inst));
- try self.store(result_ptr, bin_op, lhs_ty, 0);
+ try self.store(result_ptr, bin_op_local, lhs_ty, 0);
const offset = @intCast(u32, lhs_ty.abiSize(self.target));
try self.store(result_ptr, overflow_bit, Type.initTag(.u1), offset);
@@ -4496,12 +4588,10 @@ fn airMaxMin(self: *Self, inst: Air.Inst.Index, op: enum { max, min }) InnerErro
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
- const cmp_result = try self.cmp(lhs, rhs, ty, if (op == .max) .gt else .lt);
-
// operands to select from
try self.lowerToStack(lhs);
try self.lowerToStack(rhs);
- try self.emitWValue(cmp_result);
+ _ = try self.cmp(lhs, rhs, ty, if (op == .max) .gt else .lt);
// based on the result from comparison, return operand 0 or 1.
try self.addTag(.select);
@@ -4527,21 +4617,21 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const rhs = try self.resolveInst(bin_op.rhs);
if (ty.floatBits(self.target) == 16) {
- const addend_ext = try self.fpext(addend, ty, Type.f32);
- const lhs_ext = try self.fpext(lhs, ty, Type.f32);
const rhs_ext = try self.fpext(rhs, ty, Type.f32);
+ const lhs_ext = try self.fpext(lhs, ty, Type.f32);
+ const addend_ext = try self.fpext(addend, ty, Type.f32);
// call to compiler-rt `fn fmaf(f32, f32, f32) f32`
- const result = try self.callIntrinsic(
+ var result = try self.callIntrinsic(
"fmaf",
&.{ Type.f32, Type.f32, Type.f32 },
Type.f32,
&.{ rhs_ext, lhs_ext, addend_ext },
);
- return try self.fptrunc(result, Type.f32, ty);
+ return try (try self.fptrunc(result, Type.f32, ty)).toLocal(self, ty);
}
const mul_result = try self.binOp(lhs, rhs, ty, .mul);
- return self.binOp(mul_result, addend, ty, .add);
+ return (try self.binOp(mul_result, addend, ty, .add)).toLocal(self, ty);
}
fn airClz(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -4570,17 +4660,16 @@ fn airClz(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
try self.addTag(.i32_wrap_i64);
},
128 => {
- const msb = try self.load(operand, Type.u64, 0);
- const lsb = try self.load(operand, Type.u64, 8);
- const neq = try self.cmp(lsb, .{ .imm64 = 0 }, Type.u64, .neq);
+ var lsb = try (try self.load(operand, Type.u64, 8)).toLocal(self, Type.u64);
+ defer lsb.free(self);
try self.emitWValue(lsb);
try self.addTag(.i64_clz);
- try self.emitWValue(msb);
+ _ = try self.load(operand, Type.u64, 0);
try self.addTag(.i64_clz);
try self.emitWValue(.{ .imm64 = 64 });
try self.addTag(.i64_add);
- try self.emitWValue(neq);
+ _ = try self.cmp(lsb, .{ .imm64 = 0 }, Type.u64, .neq);
try self.addTag(.select);
try self.addTag(.i32_wrap_i64);
},
@@ -4617,28 +4706,27 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
32 => {
if (wasm_bits != int_info.bits) {
const val: u32 = @as(u32, 1) << @intCast(u5, int_info.bits);
- const bin_op = try self.binOp(operand, .{ .imm32 = val }, ty, .@"or");
- try self.emitWValue(bin_op);
+ // leave value on the stack
+ _ = try self.binOp(operand, .{ .imm32 = val }, ty, .@"or");
} else try self.emitWValue(operand);
try self.addTag(.i32_ctz);
},
64 => {
if (wasm_bits != int_info.bits) {
const val: u64 = @as(u64, 1) << @intCast(u6, int_info.bits);
- const bin_op = try self.binOp(operand, .{ .imm64 = val }, ty, .@"or");
- try self.emitWValue(bin_op);
+ // leave value on the stack
+ _ = try self.binOp(operand, .{ .imm64 = val }, ty, .@"or");
} else try self.emitWValue(operand);
try self.addTag(.i64_ctz);
try self.addTag(.i32_wrap_i64);
},
128 => {
- const msb = try self.load(operand, Type.u64, 0);
- const lsb = try self.load(operand, Type.u64, 8);
- const neq = try self.cmp(msb, .{ .imm64 = 0 }, Type.u64, .neq);
+ var msb = try (try self.load(operand, Type.u64, 0)).toLocal(self, Type.u64);
+ defer msb.free(self);
try self.emitWValue(msb);
try self.addTag(.i64_ctz);
- try self.emitWValue(lsb);
+ _ = try self.load(operand, Type.u64, 8);
if (wasm_bits != int_info.bits) {
try self.addImm64(@as(u64, 1) << @intCast(u6, int_info.bits - 64));
try self.addTag(.i64_or);
@@ -4650,7 +4738,7 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
} else {
try self.addTag(.i64_add);
}
- try self.emitWValue(neq);
+ _ = try self.cmp(msb, .{ .imm64 = 0 }, Type.u64, .neq);
try self.addTag(.select);
try self.addTag(.i32_wrap_i64);
},
@@ -4776,7 +4864,8 @@ fn lowerTry(
if (isByRef(pl_ty, self.target)) {
return buildPointerOffset(self, err_union, pl_offset, .new);
}
- return self.load(err_union, pl_ty, pl_offset);
+ const payload = try self.load(err_union, pl_ty, pl_offset);
+ return payload.toLocal(self, pl_ty);
}
fn airByteSwap(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -4806,11 +4895,11 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const res = if (int_info.signedness == .signed) blk: {
break :blk try self.wrapOperand(shr_res, Type.u8);
} else shr_res;
- return self.binOp(lhs, res, ty, .@"or");
+ return (try self.binOp(lhs, res, ty, .@"or")).toLocal(self, ty);
},
24 => {
- const msb = try self.wrapOperand(operand, Type.u16);
- const lsb = try self.wrapBinOp(operand, .{ .imm32 = 16 }, Type.u8, .shr);
+ var msb = try (try self.wrapOperand(operand, Type.u16)).toLocal(self, Type.u16);
+ defer msb.free(self);
const shl_res = try self.binOp(msb, .{ .imm32 = 8 }, Type.u16, .shl);
const lhs = try self.binOp(shl_res, .{ .imm32 = 0xFF0000 }, Type.u16, .@"and");
@@ -4824,22 +4913,26 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const rhs_wrap = try self.wrapOperand(msb, Type.u8);
const rhs_result = try self.binOp(rhs_wrap, .{ .imm32 = 16 }, ty, .shl);
+ const lsb = try self.wrapBinOp(operand, .{ .imm32 = 16 }, Type.u8, .shr);
const tmp = try self.binOp(lhs_result, rhs_result, ty, .@"or");
- return self.binOp(tmp, lsb, ty, .@"or");
+ return (try self.binOp(tmp, lsb, ty, .@"or")).toLocal(self, ty);
},
32 => {
const shl_tmp = try self.binOp(operand, .{ .imm32 = 8 }, ty, .shl);
- const lhs = try self.binOp(shl_tmp, .{ .imm32 = 0xFF00FF00 }, ty, .@"and");
+ var lhs = try (try self.binOp(shl_tmp, .{ .imm32 = 0xFF00FF00 }, ty, .@"and")).toLocal(self, ty);
+ defer lhs.free(self);
const shr_tmp = try self.binOp(operand, .{ .imm32 = 8 }, ty, .shr);
- const rhs = try self.binOp(shr_tmp, .{ .imm32 = 0xFF00FF }, ty, .@"and");
- const tmp_or = try self.binOp(lhs, rhs, ty, .@"or");
+ var rhs = try (try self.binOp(shr_tmp, .{ .imm32 = 0xFF00FF }, ty, .@"and")).toLocal(self, ty);
+ defer rhs.free(self);
+ var tmp_or = try (try self.binOp(lhs, rhs, ty, .@"or")).toLocal(self, ty);
+ defer tmp_or.free(self);
const shl = try self.binOp(tmp_or, .{ .imm32 = 16 }, ty, .shl);
const shr = try self.binOp(tmp_or, .{ .imm32 = 16 }, ty, .shr);
const res = if (int_info.signedness == .signed) blk: {
break :blk try self.wrapOperand(shr, Type.u16);
} else shr;
- return self.binOp(shl, res, ty, .@"or");
+ return (try self.binOp(shl, res, ty, .@"or")).toLocal(self, ty);
},
else => return self.fail("TODO: @byteSwap for integers with bitsize {d}", .{int_info.bits}),
}
@@ -4856,7 +4949,7 @@ fn airDiv(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
if (ty.isSignedInt()) {
return self.divSigned(lhs, rhs, ty);
}
- return self.binOp(lhs, rhs, ty, .div);
+ return (try self.binOp(lhs, rhs, ty, .div)).toLocal(self, ty);
}
fn airDivFloor(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
@@ -4868,33 +4961,31 @@ fn airDivFloor(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const rhs = try self.resolveInst(bin_op.rhs);
if (ty.isUnsignedInt()) {
- return self.binOp(lhs, rhs, ty, .div);
+ return (try self.binOp(lhs, rhs, ty, .div)).toLocal(self, ty);
} else if (ty.isSignedInt()) {
const int_bits = ty.intInfo(self.target).bits;
const wasm_bits = toWasmBits(int_bits) orelse {
return self.fail("TODO: `@divFloor` for signed integers larger than '{d}' bits", .{int_bits});
};
const lhs_res = if (wasm_bits != int_bits) blk: {
- break :blk try self.signAbsValue(lhs, ty);
+ break :blk try (try self.signAbsValue(lhs, ty)).toLocal(self, ty);
} else lhs;
const rhs_res = if (wasm_bits != int_bits) blk: {
- break :blk try self.signAbsValue(rhs, ty);
+ break :blk try (try self.signAbsValue(rhs, ty)).toLocal(self, ty);
} else rhs;
- const div_result = try self.binOp(lhs_res, rhs_res, ty, .div);
- const rem_result = try self.binOp(lhs_res, rhs_res, ty, .rem);
-
const zero = switch (wasm_bits) {
32 => WValue{ .imm32 = 0 },
64 => WValue{ .imm64 = 0 },
else => unreachable,
};
- const lhs_less_than_zero = try self.cmp(lhs_res, zero, ty, .lt);
- const rhs_less_than_zero = try self.cmp(rhs_res, zero, ty, .lt);
- try self.emitWValue(div_result);
- try self.emitWValue(lhs_less_than_zero);
- try self.emitWValue(rhs_less_than_zero);
+ const div_result = try self.allocLocal(ty);
+ // leave on stack
+ _ = try self.binOp(lhs_res, rhs_res, ty, .div);
+ try self.addLabel(.local_tee, div_result.local);
+ _ = try self.cmp(lhs_res, zero, ty, .lt);
+ _ = try self.cmp(rhs_res, zero, ty, .lt);
switch (wasm_bits) {
32 => {
try self.addTag(.i32_xor);
@@ -4907,7 +4998,8 @@ fn airDivFloor(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
else => unreachable,
}
try self.emitWValue(div_result);
- try self.emitWValue(rem_result);
+ // leave value on the stack
+ _ = try self.binOp(lhs_res, rhs_res, ty, .rem);
try self.addTag(.select);
} else {
const float_bits = ty.floatBits(self.target);
@@ -4939,9 +5031,7 @@ fn airDivFloor(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
}
if (is_f16) {
- // we can re-use temporary local
- try self.addLabel(.local_set, lhs_operand.local);
- return self.fptrunc(lhs_operand, Type.f32, Type.f16);
+ _ = try self.fptrunc(.{ .stack = {} }, Type.f32, Type.f16);
}
}
@@ -4961,10 +5051,9 @@ fn divSigned(self: *Self, lhs: WValue, rhs: WValue, ty: Type) InnerError!WValue
}
if (wasm_bits != int_bits) {
- const lhs_abs = try self.signAbsValue(lhs, ty);
- const rhs_abs = try self.signAbsValue(rhs, ty);
- try self.emitWValue(lhs_abs);
- try self.emitWValue(rhs_abs);
+ // Leave both values on the stack
+ _ = try self.signAbsValue(lhs, ty);
+ _ = try self.signAbsValue(rhs, ty);
} else {
try self.emitWValue(lhs);
try self.emitWValue(rhs);
@@ -4976,6 +5065,8 @@ fn divSigned(self: *Self, lhs: WValue, rhs: WValue, ty: Type) InnerError!WValue
return result;
}
+/// Retrieves the absolute value of a signed integer
+/// NOTE: Leaves the result value on the stack.
fn signAbsValue(self: *Self, operand: WValue, ty: Type) InnerError!WValue {
const int_bits = ty.intInfo(self.target).bits;
const wasm_bits = toWasmBits(int_bits) orelse {
@@ -5004,9 +5095,8 @@ fn signAbsValue(self: *Self, operand: WValue, ty: Type) InnerError!WValue {
},
else => unreachable,
}
- const result = try self.allocLocal(ty);
- try self.addLabel(.local_set, result.local);
- return result;
+
+ return WValue{ .stack = {} };
}
fn airCeilFloorTrunc(self: *Self, inst: Air.Inst.Index, op: Op) InnerError!WValue {
@@ -5033,9 +5123,7 @@ fn airCeilFloorTrunc(self: *Self, inst: Air.Inst.Index, op: Op) InnerError!WValu
try self.addTag(Mir.Inst.Tag.fromOpcode(opcode));
if (is_f16) {
- // re-use temporary to save locals
- try self.addLabel(.local_set, op_to_lower.local);
- return self.fptrunc(op_to_lower, Type.f32, Type.f16);
+ _ = try self.fptrunc(.{ .stack = {} }, Type.f32, Type.f16);
}
const result = try self.allocLocal(ty);
@@ -5064,7 +5152,8 @@ fn airSatBinOp(self: *Self, inst: Air.Inst.Index, op: Op) InnerError!WValue {
}
const wasm_bits = toWasmBits(int_info.bits).?;
- const bin_result = try self.binOp(lhs, rhs, ty, op);
+ var bin_result = try (try self.binOp(lhs, rhs, ty, op)).toLocal(self, ty);
+ defer bin_result.free(self);
if (wasm_bits != int_info.bits and op == .add) {
const val: u64 = @intCast(u64, (@as(u65, 1) << @intCast(u7, int_info.bits)) - 1);
const imm_val = switch (wasm_bits) {
@@ -5073,19 +5162,17 @@ fn airSatBinOp(self: *Self, inst: Air.Inst.Index, op: Op) InnerError!WValue {
else => unreachable,
};
- const cmp_result = try self.cmp(bin_result, imm_val, ty, .lt);
try self.emitWValue(bin_result);
try self.emitWValue(imm_val);
- try self.emitWValue(cmp_result);
+ _ = try self.cmp(bin_result, imm_val, ty, .lt);
} else {
- const cmp_result = try self.cmp(bin_result, lhs, ty, if (op == .add) .lt else .gt);
switch (wasm_bits) {
32 => try self.addImm32(if (op == .add) @as(i32, -1) else 0),
64 => try self.addImm64(if (op == .add) @bitCast(u64, @as(i64, -1)) else 0),
else => unreachable,
}
try self.emitWValue(bin_result);
- try self.emitWValue(cmp_result);
+ _ = try self.cmp(bin_result, lhs, ty, if (op == .add) .lt else .gt);
}
try self.addTag(.select);
@@ -5099,8 +5186,12 @@ fn signedSat(self: *Self, lhs_operand: WValue, rhs_operand: WValue, ty: Type, op
const wasm_bits = toWasmBits(int_info.bits).?;
const is_wasm_bits = wasm_bits == int_info.bits;
- const lhs = if (!is_wasm_bits) try self.signAbsValue(lhs_operand, ty) else lhs_operand;
- const rhs = if (!is_wasm_bits) try self.signAbsValue(rhs_operand, ty) else rhs_operand;
+ var lhs = if (!is_wasm_bits) lhs: {
+ break :lhs try (try self.signAbsValue(lhs_operand, ty)).toLocal(self, ty);
+ } else lhs_operand;
+ var rhs = if (!is_wasm_bits) rhs: {
+ break :rhs try (try self.signAbsValue(rhs_operand, ty)).toLocal(self, ty);
+ } else rhs_operand;
const max_val: u64 = @intCast(u64, (@as(u65, 1) << @intCast(u7, int_info.bits - 1)) - 1);
const min_val: i64 = (-@intCast(i64, @intCast(u63, max_val))) - 1;
@@ -5115,38 +5206,38 @@ fn signedSat(self: *Self, lhs_operand: WValue, rhs_operand: WValue, ty: Type, op
else => unreachable,
};
- const bin_result = try self.binOp(lhs, rhs, ty, op);
+ var bin_result = try (try self.binOp(lhs, rhs, ty, op)).toLocal(self, ty);
if (!is_wasm_bits) {
- const cmp_result_lt = try self.cmp(bin_result, max_wvalue, ty, .lt);
+ defer bin_result.free(self); // not returned in this branch
+ defer lhs.free(self); // uses temporary local for absvalue
+ defer rhs.free(self); // uses temporary local for absvalue
try self.emitWValue(bin_result);
try self.emitWValue(max_wvalue);
- try self.emitWValue(cmp_result_lt);
+ _ = try self.cmp(bin_result, max_wvalue, ty, .lt);
try self.addTag(.select);
try self.addLabel(.local_set, bin_result.local); // re-use local
- const cmp_result_gt = try self.cmp(bin_result, min_wvalue, ty, .gt);
try self.emitWValue(bin_result);
try self.emitWValue(min_wvalue);
- try self.emitWValue(cmp_result_gt);
+ _ = try self.cmp(bin_result, min_wvalue, ty, .gt);
try self.addTag(.select);
try self.addLabel(.local_set, bin_result.local); // re-use local
- return self.wrapOperand(bin_result, ty);
+ return (try self.wrapOperand(bin_result, ty)).toLocal(self, ty);
} else {
const zero = switch (wasm_bits) {
32 => WValue{ .imm32 = 0 },
64 => WValue{ .imm64 = 0 },
else => unreachable,
};
- const cmp_bin_result = try self.cmp(bin_result, lhs, ty, .lt);
- const cmp_zero_result = try self.cmp(rhs, zero, ty, if (op == .add) .lt else .gt);
- const xor = try self.binOp(cmp_zero_result, cmp_bin_result, Type.u32, .xor); // comparisons always return i32, so provide u32 as type to xor.
- const cmp_bin_zero_result = try self.cmp(bin_result, zero, ty, .lt);
try self.emitWValue(max_wvalue);
try self.emitWValue(min_wvalue);
- try self.emitWValue(cmp_bin_zero_result);
+ _ = try self.cmp(bin_result, zero, ty, .lt);
try self.addTag(.select);
try self.emitWValue(bin_result);
- try self.emitWValue(xor);
+ // leave on stack
+ const cmp_zero_result = try self.cmp(rhs, zero, ty, if (op == .add) .lt else .gt);
+ const cmp_bin_result = try self.cmp(bin_result, lhs, ty, .lt);
+ _ = try self.binOp(cmp_zero_result, cmp_bin_result, Type.u32, .xor); // comparisons always return i32, so provide u32 as type to xor.
try self.addTag(.select);
try self.addLabel(.local_set, bin_result.local); // re-use local
return bin_result;
@@ -5170,9 +5261,10 @@ fn airShlSat(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const result = try self.allocLocal(ty);
if (wasm_bits == int_info.bits) {
- const shl = try self.binOp(lhs, rhs, ty, .shl);
- const shr = try self.binOp(shl, rhs, ty, .shr);
- const cmp_result = try self.cmp(lhs, shr, ty, .neq);
+ var shl = try (try self.binOp(lhs, rhs, ty, .shl)).toLocal(self, ty);
+ defer shl.free(self);
+ var shr = try (try self.binOp(shl, rhs, ty, .shr)).toLocal(self, ty);
+ defer shr.free(self);
switch (wasm_bits) {
32 => blk: {
@@ -5180,10 +5272,9 @@ fn airShlSat(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
try self.addImm32(-1);
break :blk;
}
- const less_than_zero = try self.cmp(lhs, .{ .imm32 = 0 }, ty, .lt);
try self.addImm32(std.math.minInt(i32));
try self.addImm32(std.math.maxInt(i32));
- try self.emitWValue(less_than_zero);
+ _ = try self.cmp(lhs, .{ .imm32 = 0 }, ty, .lt);
try self.addTag(.select);
},
64 => blk: {
@@ -5191,16 +5282,15 @@ fn airShlSat(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
try self.addImm64(@bitCast(u64, @as(i64, -1)));
break :blk;
}
- const less_than_zero = try self.cmp(lhs, .{ .imm64 = 0 }, ty, .lt);
try self.addImm64(@bitCast(u64, @as(i64, std.math.minInt(i64))));
try self.addImm64(@bitCast(u64, @as(i64, std.math.maxInt(i64))));
- try self.emitWValue(less_than_zero);
+ _ = try self.cmp(lhs, .{ .imm64 = 0 }, ty, .lt);
try self.addTag(.select);
},
else => unreachable,
}
try self.emitWValue(shl);
- try self.emitWValue(cmp_result);
+ _ = try self.cmp(lhs, shr, ty, .neq);
try self.addTag(.select);
try self.addLabel(.local_set, result.local);
return result;
@@ -5212,10 +5302,12 @@ fn airShlSat(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
else => unreachable,
};
- const shl_res = try self.binOp(lhs, shift_value, ty, .shl);
- const shl = try self.binOp(shl_res, rhs, ty, .shl);
- const shr = try self.binOp(shl, rhs, ty, .shr);
- const cmp_result = try self.cmp(shl_res, shr, ty, .neq);
+ var shl_res = try (try self.binOp(lhs, shift_value, ty, .shl)).toLocal(self, ty);
+ defer shl_res.free(self);
+ var shl = try (try self.binOp(shl_res, rhs, ty, .shl)).toLocal(self, ty);
+ defer shl.free(self);
+ var shr = try (try self.binOp(shl, rhs, ty, .shr)).toLocal(self, ty);
+ defer shr.free(self);
switch (wasm_bits) {
32 => blk: {
@@ -5224,10 +5316,9 @@ fn airShlSat(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
break :blk;
}
- const less_than_zero = try self.cmp(shl_res, .{ .imm32 = 0 }, ty, .lt);
try self.addImm32(std.math.minInt(i32));
try self.addImm32(std.math.maxInt(i32));
- try self.emitWValue(less_than_zero);
+ _ = try self.cmp(shl_res, .{ .imm32 = 0 }, ty, .lt);
try self.addTag(.select);
},
64 => blk: {
@@ -5236,29 +5327,31 @@ fn airShlSat(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
break :blk;
}
- const less_than_zero = try self.cmp(shl_res, .{ .imm64 = 0 }, ty, .lt);
try self.addImm64(@bitCast(u64, @as(i64, std.math.minInt(i64))));
try self.addImm64(@bitCast(u64, @as(i64, std.math.maxInt(i64))));
- try self.emitWValue(less_than_zero);
+ _ = try self.cmp(shl_res, .{ .imm64 = 0 }, ty, .lt);
try self.addTag(.select);
},
else => unreachable,
}
try self.emitWValue(shl);
- try self.emitWValue(cmp_result);
+ _ = try self.cmp(shl_res, shr, ty, .neq);
try self.addTag(.select);
try self.addLabel(.local_set, result.local);
- const shift_result = try self.binOp(result, shift_value, ty, .shr);
+ var shift_result = try self.binOp(result, shift_value, ty, .shr);
if (is_signed) {
- return self.wrapOperand(shift_result, ty);
+ shift_result = try self.wrapOperand(shift_result, ty);
}
- return shift_result;
+ return shift_result.toLocal(self, ty);
}
}
/// Calls a compiler-rt intrinsic by creating an undefined symbol,
/// then lowering the arguments and calling the symbol as a function call.
/// This function call assumes the C-ABI.
+/// Asserts arguments are not stack values when the return value is
+/// passed as the first parameter.
+/// May leave the return value on the stack.
fn callIntrinsic(
self: *Self,
name: []const u8,
@@ -5288,6 +5381,7 @@ fn callIntrinsic(
// Lower all arguments to the stack before we call our function
for (args) |arg, arg_i| {
+ assert(!(want_sret_param and arg == .stack));
assert(param_types[arg_i].hasRuntimeBitsIgnoreComptime());
try self.lowerArg(.C, param_types[arg_i], arg);
}
@@ -5303,8 +5397,6 @@ fn callIntrinsic(
} else if (want_sret_param) {
return sret;
} else {
- const result_local = try self.allocLocal(return_type);
- try self.addLabel(.local_set, result_local.local);
- return result_local;
+ return WValue{ .stack = {} };
}
}
diff --git a/src/arch/wasm/Emit.zig b/src/arch/wasm/Emit.zig
index 0081dc4d5d..0f3ee9a2a2 100644
--- a/src/arch/wasm/Emit.zig
+++ b/src/arch/wasm/Emit.zig
@@ -343,7 +343,7 @@ fn emitMemArg(emit: *Emit, tag: Mir.Inst.Tag, inst: Mir.Inst.Index) !void {
try emit.code.append(@enumToInt(tag));
// wasm encodes alignment as power of 2, rather than natural alignment
- const encoded_alignment = @ctz(u32, mem_arg.alignment);
+ const encoded_alignment = @ctz(mem_arg.alignment);
try leb128.writeULEB128(emit.code.writer(), encoded_alignment);
try leb128.writeULEB128(emit.code.writer(), mem_arg.offset);
}
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index 09721c661f..106d2feec0 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -775,6 +775,9 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.float_to_int_optimized,
=> return self.fail("TODO implement optimized float mode", .{}),
+ .is_named_enum_value => return self.fail("TODO implement is_named_enum_value", .{}),
+ .error_set_has_value => return self.fail("TODO implement error_set_has_value", .{}),
+
.wasm_memory_size => unreachable,
.wasm_memory_grow => unreachable,
// zig fmt: on
@@ -3789,7 +3792,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
const ty = self.air.typeOfIndex(inst);
const mcv = self.args[arg_index];
- const name = self.mod_fn.getParamName(arg_index);
+ const name = self.mod_fn.getParamName(self.bin_file.options.module.?, arg_index);
const name_with_null = name.ptr[0 .. name.len + 1];
if (self.liveness.isUnused(inst))
@@ -4368,6 +4371,7 @@ fn genVarDbgInfo(
.dwarf => |dw| {
const dbg_info = &dw.dbg_info;
try dbg_info.append(@enumToInt(link.File.Dwarf.AbbrevKind.variable));
+ const endian = self.target.cpu.arch.endian();
switch (mcv) {
.register => |reg| {
@@ -4388,7 +4392,6 @@ fn genVarDbgInfo(
dbg_info.items[fixup] += @intCast(u8, dbg_info.items.len - fixup - 2);
},
.memory, .got_load, .direct_load => {
- const endian = self.target.cpu.arch.endian();
const ptr_width = @intCast(u8, @divExact(self.target.cpu.arch.ptrBitWidth(), 8));
const is_ptr = switch (tag) {
.dbg_var_ptr => true,
@@ -4423,7 +4426,53 @@ fn genVarDbgInfo(
else => {},
}
},
+ .immediate => |x| {
+ const signedness: std.builtin.Signedness = blk: {
+ if (ty.zigTypeTag() != .Int) break :blk .unsigned;
+ break :blk ty.intInfo(self.target.*).signedness;
+ };
+ try dbg_info.ensureUnusedCapacity(2);
+ const fixup = dbg_info.items.len;
+ dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT.location, DW.FORM.exprloc
+ 1,
+ switch (signedness) {
+ .signed => DW.OP.consts,
+ .unsigned => DW.OP.constu,
+ },
+ });
+ switch (signedness) {
+ .signed => try leb128.writeILEB128(dbg_info.writer(), @bitCast(i64, x)),
+ .unsigned => try leb128.writeULEB128(dbg_info.writer(), x),
+ }
+ try dbg_info.append(DW.OP.stack_value);
+ dbg_info.items[fixup] += @intCast(u8, dbg_info.items.len - fixup - 2);
+ },
+ .undef => {
+ // DW.AT.location, DW.FORM.exprloc
+ // uleb128(exprloc_len)
+ // DW.OP.implicit_value uleb128(len_of_bytes) bytes
+ const abi_size = @intCast(u32, ty.abiSize(self.target.*));
+ var implicit_value_len = std.ArrayList(u8).init(self.gpa);
+ defer implicit_value_len.deinit();
+ try leb128.writeULEB128(implicit_value_len.writer(), abi_size);
+ const total_exprloc_len = 1 + implicit_value_len.items.len + abi_size;
+ try leb128.writeULEB128(dbg_info.writer(), total_exprloc_len);
+ try dbg_info.ensureUnusedCapacity(total_exprloc_len);
+ dbg_info.appendAssumeCapacity(DW.OP.implicit_value);
+ dbg_info.appendSliceAssumeCapacity(implicit_value_len.items);
+ dbg_info.appendNTimesAssumeCapacity(0xaa, abi_size);
+ },
+ .none => {
+ try dbg_info.ensureUnusedCapacity(3);
+ dbg_info.appendSliceAssumeCapacity(&[3]u8{ // DW.AT.location, DW.FORM.exprloc
+ 2, DW.OP.lit0, DW.OP.stack_value,
+ });
+ },
else => {
+ try dbg_info.ensureUnusedCapacity(2);
+ dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT.location, DW.FORM.exprloc
+ 1, DW.OP.nop,
+ });
log.debug("TODO generate debug info for {}", .{mcv});
},
}
@@ -6475,13 +6524,13 @@ fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void {
const extra = self.air.extraData(Air.Block, ty_pl.payload);
_ = ty_pl;
_ = extra;
- return self.fail("TODO implement airCmpxchg for {}", .{self.target.cpu.arch});
+ return self.fail("TODO implement x86 airCmpxchg", .{});
// return self.finishAir(inst, result, .{ extra.ptr, extra.expected_value, extra.new_value });
}
fn airAtomicRmw(self: *Self, inst: Air.Inst.Index) !void {
_ = inst;
- return self.fail("TODO implement airCmpxchg for {}", .{self.target.cpu.arch});
+ return self.fail("TODO implement x86 airAtomicRaw", .{});
}
fn airAtomicLoad(self: *Self, inst: Air.Inst.Index) !void {
diff --git a/src/autodoc/render_source.zig b/src/autodoc/render_source.zig
new file mode 100644
index 0000000000..cafed8d526
--- /dev/null
+++ b/src/autodoc/render_source.zig
@@ -0,0 +1,424 @@
+const std = @import("std");
+const builtin = @import("builtin");
+const io = std.io;
+const fs = std.fs;
+const process = std.process;
+const ChildProcess = std.ChildProcess;
+const Progress = std.Progress;
+const print = std.debug.print;
+const mem = std.mem;
+const testing = std.testing;
+const Allocator = std.mem.Allocator;
+const Module = @import("../Module.zig");
+
+pub fn genHtml(
+ allocator: Allocator,
+ src: *Module.File,
+ out: anytype,
+) !void {
+ try out.writeAll(
+ \\
+ \\
+ \\
+ \\
+ \\
+ );
+ try out.print(" {s} - source view \n", .{src.sub_file_path});
+ try out.writeAll(
+ \\
+ \\
+ \\
+ \\
+ \\
+ );
+
+ const source = try src.getSource(allocator);
+ try tokenizeAndPrintRaw(allocator, out, source.bytes);
+ try out.writeAll(
+ \\
+ \\
+ );
+}
+
+const start_line = "";
+const end_line = "\n";
+
+var line_counter: usize = 1;
+
+pub fn tokenizeAndPrintRaw(
+ allocator: Allocator,
+ out: anytype,
+ raw_src: [:0]const u8,
+) !void {
+ const src = try allocator.dupeZ(u8, raw_src);
+ defer allocator.free(src);
+
+ line_counter = 1;
+
+ try out.print("" ++ start_line, .{line_counter});
+ var tokenizer = std.zig.Tokenizer.init(src);
+ var index: usize = 0;
+ var next_tok_is_fn = false;
+ while (true) {
+ const prev_tok_was_fn = next_tok_is_fn;
+ next_tok_is_fn = false;
+
+ const token = tokenizer.next();
+ if (mem.indexOf(u8, src[index..token.loc.start], "//")) |comment_start_off| {
+ // render one comment
+ const comment_start = index + comment_start_off;
+ const comment_end_off = mem.indexOf(u8, src[comment_start..token.loc.start], "\n");
+ const comment_end = if (comment_end_off) |o| comment_start + o else token.loc.start;
+
+ try writeEscapedLines(out, src[index..comment_start]);
+ try out.writeAll("");
+ try writeEscaped(out, src[comment_start..comment_end]);
+ try out.writeAll("\n");
+ index = comment_end;
+ tokenizer.index = index;
+ continue;
+ }
+
+ try writeEscapedLines(out, src[index..token.loc.start]);
+ switch (token.tag) {
+ .eof => break,
+
+ .keyword_addrspace,
+ .keyword_align,
+ .keyword_and,
+ .keyword_asm,
+ .keyword_async,
+ .keyword_await,
+ .keyword_break,
+ .keyword_catch,
+ .keyword_comptime,
+ .keyword_const,
+ .keyword_continue,
+ .keyword_defer,
+ .keyword_else,
+ .keyword_enum,
+ .keyword_errdefer,
+ .keyword_error,
+ .keyword_export,
+ .keyword_extern,
+ .keyword_for,
+ .keyword_if,
+ .keyword_inline,
+ .keyword_noalias,
+ .keyword_noinline,
+ .keyword_nosuspend,
+ .keyword_opaque,
+ .keyword_or,
+ .keyword_orelse,
+ .keyword_packed,
+ .keyword_anyframe,
+ .keyword_pub,
+ .keyword_resume,
+ .keyword_return,
+ .keyword_linksection,
+ .keyword_callconv,
+ .keyword_struct,
+ .keyword_suspend,
+ .keyword_switch,
+ .keyword_test,
+ .keyword_threadlocal,
+ .keyword_try,
+ .keyword_union,
+ .keyword_unreachable,
+ .keyword_usingnamespace,
+ .keyword_var,
+ .keyword_volatile,
+ .keyword_allowzero,
+ .keyword_while,
+ .keyword_anytype,
+ => {
+ try out.writeAll("");
+ try writeEscaped(out, src[token.loc.start..token.loc.end]);
+ try out.writeAll("");
+ },
+
+ .keyword_fn => {
+ try out.writeAll("");
+ try writeEscaped(out, src[token.loc.start..token.loc.end]);
+ try out.writeAll("");
+ next_tok_is_fn = true;
+ },
+
+ .string_literal,
+ .char_literal,
+ => {
+ try out.writeAll("");
+ try writeEscaped(out, src[token.loc.start..token.loc.end]);
+ try out.writeAll("");
+ },
+
+ .multiline_string_literal_line => {
+ if (src[token.loc.end - 1] == '\n') {
+ try out.writeAll("");
+ try writeEscaped(out, src[token.loc.start .. token.loc.end - 1]);
+ line_counter += 1;
+ try out.print("" ++ end_line ++ "\n" ++ start_line, .{line_counter});
+ } else {
+ try out.writeAll("");
+ try writeEscaped(out, src[token.loc.start..token.loc.end]);
+ try out.writeAll("");
+ }
+ },
+
+ .builtin => {
+ try out.writeAll("");
+ try writeEscaped(out, src[token.loc.start..token.loc.end]);
+ try out.writeAll("");
+ },
+
+ .doc_comment,
+ .container_doc_comment,
+ => {
+ try out.writeAll("");
+ try writeEscaped(out, src[token.loc.start..token.loc.end]);
+ try out.writeAll("");
+ },
+
+ .identifier => {
+ const tok_bytes = src[token.loc.start..token.loc.end];
+ if (mem.eql(u8, tok_bytes, "undefined") or
+ mem.eql(u8, tok_bytes, "null") or
+ mem.eql(u8, tok_bytes, "true") or
+ mem.eql(u8, tok_bytes, "false"))
+ {
+ try out.writeAll("");
+ try writeEscaped(out, tok_bytes);
+ try out.writeAll("");
+ } else if (prev_tok_was_fn) {
+ try out.writeAll("");
+ try writeEscaped(out, tok_bytes);
+ try out.writeAll("");
+ } else {
+ const is_int = blk: {
+ if (src[token.loc.start] != 'i' and src[token.loc.start] != 'u')
+ break :blk false;
+ var i = token.loc.start + 1;
+ if (i == token.loc.end)
+ break :blk false;
+ while (i != token.loc.end) : (i += 1) {
+ if (src[i] < '0' or src[i] > '9')
+ break :blk false;
+ }
+ break :blk true;
+ };
+ if (is_int or isType(tok_bytes)) {
+ try out.writeAll("");
+ try writeEscaped(out, tok_bytes);
+ try out.writeAll("");
+ } else {
+ try writeEscaped(out, tok_bytes);
+ }
+ }
+ },
+
+ .integer_literal,
+ .float_literal,
+ => {
+ try out.writeAll("");
+ try writeEscaped(out, src[token.loc.start..token.loc.end]);
+ try out.writeAll("");
+ },
+
+ .bang,
+ .pipe,
+ .pipe_pipe,
+ .pipe_equal,
+ .equal,
+ .equal_equal,
+ .equal_angle_bracket_right,
+ .bang_equal,
+ .l_paren,
+ .r_paren,
+ .semicolon,
+ .percent,
+ .percent_equal,
+ .l_brace,
+ .r_brace,
+ .l_bracket,
+ .r_bracket,
+ .period,
+ .period_asterisk,
+ .ellipsis2,
+ .ellipsis3,
+ .caret,
+ .caret_equal,
+ .plus,
+ .plus_plus,
+ .plus_equal,
+ .plus_percent,
+ .plus_percent_equal,
+ .plus_pipe,
+ .plus_pipe_equal,
+ .minus,
+ .minus_equal,
+ .minus_percent,
+ .minus_percent_equal,
+ .minus_pipe,
+ .minus_pipe_equal,
+ .asterisk,
+ .asterisk_equal,
+ .asterisk_asterisk,
+ .asterisk_percent,
+ .asterisk_percent_equal,
+ .asterisk_pipe,
+ .asterisk_pipe_equal,
+ .arrow,
+ .colon,
+ .slash,
+ .slash_equal,
+ .comma,
+ .ampersand,
+ .ampersand_equal,
+ .question_mark,
+ .angle_bracket_left,
+ .angle_bracket_left_equal,
+ .angle_bracket_angle_bracket_left,
+ .angle_bracket_angle_bracket_left_equal,
+ .angle_bracket_angle_bracket_left_pipe,
+ .angle_bracket_angle_bracket_left_pipe_equal,
+ .angle_bracket_right,
+ .angle_bracket_right_equal,
+ .angle_bracket_angle_bracket_right,
+ .angle_bracket_angle_bracket_right_equal,
+ .tilde,
+ => try writeEscaped(out, src[token.loc.start..token.loc.end]),
+
+ .invalid, .invalid_periodasterisks => return error.ParseError,
+ }
+ index = token.loc.end;
+ }
+ try out.writeAll(end_line ++ "
");
+}
+
+fn writeEscapedLines(out: anytype, text: []const u8) !void {
+ for (text) |char| {
+ if (char == '\n') {
+ try out.writeAll(end_line);
+ line_counter += 1;
+ try out.print(start_line, .{line_counter});
+ } else {
+ try writeEscaped(out, &[_]u8{char});
+ }
+ }
+}
+
+fn writeEscaped(out: anytype, input: []const u8) !void {
+ for (input) |c| {
+ try switch (c) {
+ '&' => out.writeAll("&"),
+ '<' => out.writeAll("<"),
+ '>' => out.writeAll(">"),
+ '"' => out.writeAll("""),
+ else => out.writeByte(c),
+ };
+ }
+}
+
+const builtin_types = [_][]const u8{
+ "f16", "f32", "f64", "f128", "c_longdouble", "c_short",
+ "c_ushort", "c_int", "c_uint", "c_long", "c_ulong", "c_longlong",
+ "c_ulonglong", "c_char", "anyopaque", "void", "bool", "isize",
+ "usize", "noreturn", "type", "anyerror", "comptime_int", "comptime_float",
+};
+
+fn isType(name: []const u8) bool {
+ for (builtin_types) |t| {
+ if (mem.eql(u8, t, name))
+ return true;
+ }
+ return false;
+}
diff --git a/src/clang_options_data.zig b/src/clang_options_data.zig
index 2699a9bf3b..edabac2da6 100644
--- a/src/clang_options_data.zig
+++ b/src/clang_options_data.zig
@@ -33,7 +33,14 @@ flagpd1("H"),
.psl = false,
},
flagpd1("I-"),
-flagpd1("M"),
+.{
+ .name = "M",
+ .syntax = .flag,
+ .zig_equivalent = .dep_file_to_stdout,
+ .pd1 = true,
+ .pd2 = false,
+ .psl = false,
+},
.{
.name = "MD",
.syntax = .flag,
@@ -53,7 +60,7 @@ flagpd1("M"),
.{
.name = "MM",
.syntax = .flag,
- .zig_equivalent = .dep_file_mm,
+ .zig_equivalent = .dep_file_to_stdout,
.pd1 = true,
.pd2 = false,
.psl = false,
@@ -2033,7 +2040,7 @@ flagpsl("MT"),
.{
.name = "user-dependencies",
.syntax = .flag,
- .zig_equivalent = .dep_file_mm,
+ .zig_equivalent = .dep_file_to_stdout,
.pd1 = false,
.pd2 = true,
.psl = false,
@@ -3390,7 +3397,14 @@ flagpd1("fno-stack-arrays"),
.psl = false,
},
flagpd1("fno-stack-clash-protection"),
-flagpd1("fno-stack-protector"),
+.{
+ .name = "fno-stack-protector",
+ .syntax = .flag,
+ .zig_equivalent = .no_stack_protector,
+ .pd1 = true,
+ .pd2 = false,
+ .psl = false,
+},
flagpd1("fno-stack-size-section"),
flagpd1("fno-standalone-debug"),
flagpd1("fno-strength-reduce"),
@@ -3689,9 +3703,30 @@ flagpd1("fstack-arrays"),
.psl = false,
},
flagpd1("fstack-clash-protection"),
-flagpd1("fstack-protector"),
-flagpd1("fstack-protector-all"),
-flagpd1("fstack-protector-strong"),
+.{
+ .name = "fstack-protector",
+ .syntax = .flag,
+ .zig_equivalent = .stack_protector,
+ .pd1 = true,
+ .pd2 = false,
+ .psl = false,
+},
+.{
+ .name = "fstack-protector-all",
+ .syntax = .flag,
+ .zig_equivalent = .stack_protector,
+ .pd1 = true,
+ .pd2 = false,
+ .psl = false,
+},
+.{
+ .name = "fstack-protector-strong",
+ .syntax = .flag,
+ .zig_equivalent = .stack_protector,
+ .pd1 = true,
+ .pd2 = false,
+ .psl = false,
+},
flagpd1("fstack-size-section"),
flagpd1("fstack-usage"),
flagpd1("fstandalone-debug"),
@@ -4978,7 +5013,14 @@ flagpd1("single_module"),
},
sepd1("split-dwarf-file"),
sepd1("split-dwarf-output"),
-sepd1("stack-protector"),
+.{
+ .name = "stack-protector",
+ .syntax = .separate,
+ .zig_equivalent = .stack_protector,
+ .pd1 = true,
+ .pd2 = false,
+ .psl = false,
+},
sepd1("stack-protector-buffer-size"),
sepd1("stack-usage-file"),
.{
diff --git a/src/codegen.zig b/src/codegen.zig
index 025decdb4b..f5340458a5 100644
--- a/src/codegen.zig
+++ b/src/codegen.zig
@@ -607,7 +607,7 @@ pub fn generateSymbol(
const union_ty = typed_value.ty.cast(Type.Payload.Union).?.data;
const mod = bin_file.options.module.?;
- const field_index = union_ty.tag_ty.enumTagFieldIndex(union_obj.tag, mod).?;
+ const field_index = typed_value.ty.unionTagFieldIndex(union_obj.tag, mod).?;
assert(union_ty.haveFieldTypes());
const field_ty = union_ty.fields.values()[field_index].ty;
if (!field_ty.hasRuntimeBits()) {
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index 280b7604bf..4a09c09cc9 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -835,7 +835,6 @@ pub const DeclGen = struct {
},
.Union => {
const union_obj = val.castTag(.@"union").?.data;
- const union_ty = ty.cast(Type.Payload.Union).?.data;
const layout = ty.unionGetLayout(target);
try writer.writeAll("(");
@@ -851,7 +850,7 @@ pub const DeclGen = struct {
try writer.writeAll(".payload = {");
}
- const index = union_ty.tag_ty.enumTagFieldIndex(union_obj.tag, dg.module).?;
+ const index = ty.unionTagFieldIndex(union_obj.tag, dg.module).?;
const field_ty = ty.unionFields().values()[index].ty;
const field_name = ty.unionFields().keys()[index];
if (field_ty.hasRuntimeBits()) {
@@ -1952,6 +1951,9 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO
.reduce_optimized,
.float_to_int_optimized,
=> return f.fail("TODO implement optimized float mode", .{}),
+
+ .is_named_enum_value => return f.fail("TODO: C backend: implement is_named_enum_value", .{}),
+ .error_set_has_value => return f.fail("TODO: C backend: implement error_set_has_value", .{}),
// zig fmt: on
};
switch (result_value) {
@@ -3250,7 +3252,7 @@ fn airIsNull(
const ty = f.air.typeOf(un_op);
var opt_buf: Type.Payload.ElemType = undefined;
- const payload_ty = if (ty.zigTypeTag() == .Pointer)
+ const payload_ty = if (deref_suffix[0] != 0)
ty.childType().optionalChild(&opt_buf)
else
ty.optionalChild(&opt_buf);
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index a286f8ad01..e6bebe521e 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -222,6 +222,8 @@ pub const Object = struct {
/// * it works for functions not all globals.
/// Therefore, this table keeps track of the mapping.
decl_map: std.AutoHashMapUnmanaged(Module.Decl.Index, *const llvm.Value),
+ /// Serves the same purpose as `decl_map` but only used for the `is_named_enum_value` instruction.
+ named_enum_map: std.AutoHashMapUnmanaged(Module.Decl.Index, *const llvm.Value),
/// Maps Zig types to LLVM types. The table memory itself is backed by the GPA of
/// the compiler, but the Type/Value memory here is backed by `type_map_arena`.
/// TODO we need to remove entries from this map in response to incremental compilation
@@ -292,7 +294,7 @@ pub const Object = struct {
var di_compile_unit: ?*llvm.DICompileUnit = null;
if (!options.strip) {
- switch (options.object_format) {
+ switch (options.target.ofmt) {
.coff => llvm_module.addModuleCodeViewFlag(),
else => llvm_module.addModuleDebugInfoFlag(),
}
@@ -398,6 +400,7 @@ pub const Object = struct {
.target_data = target_data,
.target = options.target,
.decl_map = .{},
+ .named_enum_map = .{},
.type_map = .{},
.type_map_arena = std.heap.ArenaAllocator.init(gpa),
.di_type_map = .{},
@@ -417,6 +420,7 @@ pub const Object = struct {
self.llvm_module.dispose();
self.context.dispose();
self.decl_map.deinit(gpa);
+ self.named_enum_map.deinit(gpa);
self.type_map.deinit(gpa);
self.type_map_arena.deinit();
self.extern_collisions.deinit(gpa);
@@ -728,9 +732,14 @@ pub const Object = struct {
DeclGen.removeFnAttr(llvm_func, "noinline");
}
- // TODO: port these over from stage1
- // addLLVMFnAttr(llvm_fn, "sspstrong");
- // addLLVMFnAttrStr(llvm_fn, "stack-protector-buffer-size", "4");
+ // TODO: disable this if safety is off for the function scope
+ const ssp_buf_size = module.comp.bin_file.options.stack_protector;
+ if (ssp_buf_size != 0) {
+ var buf: [12]u8 = undefined;
+ const arg = std.fmt.bufPrintZ(&buf, "{d}", .{ssp_buf_size}) catch unreachable;
+ dg.addFnAttr(llvm_func, "sspstrong");
+ dg.addFnAttrString(llvm_func, "stack-protector-buffer-size", arg);
+ }
// TODO: disable this if safety is off for the function scope
if (module.comp.bin_file.options.stack_check) {
@@ -739,6 +748,10 @@ pub const Object = struct {
dg.addFnAttrString(llvm_func, "no-stack-arg-probe", "");
}
+ if (decl.@"linksection") |section| {
+ llvm_func.setSection(section);
+ }
+
// Remove all the basic blocks of a function in order to start over, generating
// LLVM IR from an empty function body.
while (llvm_func.getFirstBasicBlock()) |bb| {
@@ -935,6 +948,40 @@ pub const Object = struct {
};
try args.append(loaded);
},
+ .multiple_llvm_float => {
+ const llvm_floats = it.llvm_types_buffer[0..it.llvm_types_len];
+ const param_ty = fn_info.param_types[it.zig_index - 1];
+ const param_llvm_ty = try dg.lowerType(param_ty);
+ const param_alignment = param_ty.abiAlignment(target);
+ const arg_ptr = buildAllocaInner(builder, llvm_func, false, param_llvm_ty);
+ arg_ptr.setAlignment(param_alignment);
+ var field_types_buf: [8]*const llvm.Type = undefined;
+ const field_types = field_types_buf[0..llvm_floats.len];
+ for (llvm_floats) |float_bits, i| {
+ switch (float_bits) {
+ 64 => field_types[i] = dg.context.doubleType(),
+ 80 => field_types[i] = dg.context.x86FP80Type(),
+ else => {},
+ }
+ }
+ const ints_llvm_ty = dg.context.structType(field_types.ptr, @intCast(c_uint, field_types.len), .False);
+ const casted_ptr = builder.buildBitCast(arg_ptr, ints_llvm_ty.pointerType(0), "");
+ for (llvm_floats) |_, i_usize| {
+ const i = @intCast(c_uint, i_usize);
+ const param = llvm_func.getParam(i);
+ const field_ptr = builder.buildStructGEP(casted_ptr, i, "");
+ const store_inst = builder.buildStore(param, field_ptr);
+ store_inst.setAlignment(target.cpu.arch.ptrBitWidth() / 8);
+ }
+
+ const is_by_ref = isByRef(param_ty);
+ const loaded = if (is_by_ref) arg_ptr else l: {
+ const load_inst = builder.buildLoad(arg_ptr, "");
+ load_inst.setAlignment(param_alignment);
+ break :l load_inst;
+ };
+ try args.append(loaded);
+ },
.as_u16 => {
const param = llvm_func.getParam(llvm_arg_i);
llvm_arg_i += 1;
@@ -1078,6 +1125,7 @@ pub const Object = struct {
}
llvm_global.setUnnamedAddr(.False);
llvm_global.setLinkage(.External);
+ if (module.wantDllExports()) llvm_global.setDLLStorageClass(.Default);
if (self.di_map.get(decl)) |di_node| {
if (try decl.isFunction()) {
const di_func = @ptrCast(*llvm.DISubprogram, di_node);
@@ -1103,6 +1151,7 @@ pub const Object = struct {
const exp_name = exports[0].options.name;
llvm_global.setValueName2(exp_name.ptr, exp_name.len);
llvm_global.setUnnamedAddr(.False);
+ if (module.wantDllExports()) llvm_global.setDLLStorageClass(.DLLExport);
if (self.di_map.get(decl)) |di_node| {
if (try decl.isFunction()) {
const di_func = @ptrCast(*llvm.DISubprogram, di_node);
@@ -1125,6 +1174,11 @@ pub const Object = struct {
.hidden => llvm_global.setVisibility(.Hidden),
.protected => llvm_global.setVisibility(.Protected),
}
+ if (exports[0].options.section) |section| {
+ const section_z = try module.gpa.dupeZ(u8, section);
+ defer module.gpa.free(section_z);
+ llvm_global.setSection(section_z);
+ }
if (decl.val.castTag(.variable)) |variable| {
if (variable.data.is_threadlocal) {
llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel);
@@ -1157,6 +1211,7 @@ pub const Object = struct {
defer module.gpa.free(fqn);
llvm_global.setValueName2(fqn.ptr, fqn.len);
llvm_global.setLinkage(.Internal);
+ if (module.wantDllExports()) llvm_global.setDLLStorageClass(.Default);
llvm_global.setUnnamedAddr(.True);
if (decl.val.castTag(.variable)) |variable| {
const single_threaded = module.comp.bin_file.options.single_threaded;
@@ -1701,8 +1756,7 @@ pub const Object = struct {
if (ty.castTag(.@"struct")) |payload| {
const struct_obj = payload.data;
if (struct_obj.layout == .Packed) {
- var buf: Type.Payload.Bits = undefined;
- const info = struct_obj.packedIntegerType(target, &buf).intInfo(target);
+ const info = struct_obj.backing_int_ty.intInfo(target);
const dwarf_encoding: c_uint = switch (info.signedness) {
.signed => DW.ATE.signed,
.unsigned => DW.ATE.unsigned,
@@ -1817,6 +1871,7 @@ pub const Object = struct {
}
const fields = ty.structFields();
+ const layout = ty.containerLayout();
var di_fields: std.ArrayListUnmanaged(*llvm.DIType) = .{};
defer di_fields.deinit(gpa);
@@ -1827,10 +1882,10 @@ pub const Object = struct {
var offset: u64 = 0;
for (fields.values()) |field, i| {
- if (field.is_comptime or !field.ty.hasRuntimeBitsIgnoreComptime()) continue;
+ if (field.is_comptime or !field.ty.hasRuntimeBits()) continue;
const field_size = field.ty.abiSize(target);
- const field_align = field.normalAlignment(target);
+ const field_align = field.alignment(target, layout);
const field_offset = std.mem.alignForwardGeneric(u64, offset, field_align);
offset = field_offset + field_size;
@@ -2202,6 +2257,7 @@ pub const DeclGen = struct {
const target = dg.module.getTarget();
var global = try dg.resolveGlobalDecl(decl_index);
global.setAlignment(decl.getAlignment(target));
+ if (decl.@"linksection") |section| global.setSection(section);
assert(decl.has_tv);
const init_val = if (decl.val.castTag(.variable)) |payload| init_val: {
const variable = payload.data;
@@ -2235,6 +2291,7 @@ pub const DeclGen = struct {
new_global.setLinkage(global.getLinkage());
new_global.setUnnamedAddr(global.getUnnamedAddress());
new_global.setAlignment(global.getAlignment());
+ if (decl.@"linksection") |section| new_global.setSection(section);
new_global.setInitializer(llvm_init);
// replaceAllUsesWith requires the type to be unchanged. So we bitcast
// the new global to the old type and use that as the thing to replace
@@ -2349,6 +2406,14 @@ pub const DeclGen = struct {
dg.addFnAttr(llvm_fn, "noreturn");
}
+ var llvm_arg_i = @as(c_uint, @boolToInt(sret)) + @boolToInt(err_return_tracing);
+ var it = iterateParamTypes(dg, fn_info);
+ while (it.next()) |_| : (llvm_arg_i += 1) {
+ if (!it.byval_attr) continue;
+ const param = llvm_fn.getParam(llvm_arg_i);
+ llvm_fn.addByValAttr(llvm_arg_i, param.typeOf().getElementType());
+ }
+
return llvm_fn;
}
@@ -2688,9 +2753,7 @@ pub const DeclGen = struct {
const struct_obj = t.castTag(.@"struct").?.data;
if (struct_obj.layout == .Packed) {
- var buf: Type.Payload.Bits = undefined;
- const int_ty = struct_obj.packedIntegerType(target, &buf);
- const int_llvm_ty = try dg.lowerType(int_ty);
+ const int_llvm_ty = try dg.lowerType(struct_obj.backing_int_ty);
gop.value_ptr.* = int_llvm_ty;
return int_llvm_ty;
}
@@ -2714,9 +2777,9 @@ pub const DeclGen = struct {
var any_underaligned_fields = false;
for (struct_obj.fields.values()) |field| {
- if (field.is_comptime or !field.ty.hasRuntimeBitsIgnoreComptime()) continue;
+ if (field.is_comptime or !field.ty.hasRuntimeBits()) continue;
- const field_align = field.normalAlignment(target);
+ const field_align = field.alignment(target, struct_obj.layout);
const field_ty_align = field.ty.abiAlignment(target);
any_underaligned_fields = any_underaligned_fields or
field_align < field_ty_align;
@@ -2895,6 +2958,18 @@ pub const DeclGen = struct {
llvm_params.appendAssumeCapacity(big_int_ty);
}
},
+ .multiple_llvm_float => {
+ const llvm_ints = it.llvm_types_buffer[0..it.llvm_types_len];
+ try llvm_params.ensureUnusedCapacity(it.llvm_types_len);
+ for (llvm_ints) |float_bits| {
+ const float_ty = switch (float_bits) {
+ 64 => dg.context.doubleType(),
+ 80 => dg.context.x86FP80Type(),
+ else => unreachable,
+ };
+ llvm_params.appendAssumeCapacity(float_ty);
+ }
+ },
.as_u16 => {
try llvm_params.append(dg.context.intType(16));
},
@@ -3356,8 +3431,8 @@ pub const DeclGen = struct {
const struct_obj = tv.ty.castTag(.@"struct").?.data;
if (struct_obj.layout == .Packed) {
- const big_bits = struct_obj.packedIntegerBits(target);
- const int_llvm_ty = dg.context.intType(big_bits);
+ const big_bits = struct_obj.backing_int_ty.bitSize(target);
+ const int_llvm_ty = dg.context.intType(@intCast(c_uint, big_bits));
const fields = struct_obj.fields.values();
comptime assert(Type.packed_struct_layout_version == 2);
var running_int: *const llvm.Value = int_llvm_ty.constNull();
@@ -3372,7 +3447,10 @@ pub const DeclGen = struct {
});
const ty_bit_size = @intCast(u16, field.ty.bitSize(target));
const small_int_ty = dg.context.intType(ty_bit_size);
- const small_int_val = non_int_val.constBitCast(small_int_ty);
+ const small_int_val = if (field.ty.isPtrAtRuntime())
+ non_int_val.constPtrToInt(small_int_ty)
+ else
+ non_int_val.constBitCast(small_int_ty);
const shift_rhs = int_llvm_ty.constInt(running_bits, .False);
// If the field is as large as the entire packed struct, this
// zext would go from, e.g. i16 to i16. This is legal with
@@ -3395,9 +3473,9 @@ pub const DeclGen = struct {
var need_unnamed = false;
for (struct_obj.fields.values()) |field, i| {
- if (field.is_comptime or !field.ty.hasRuntimeBitsIgnoreComptime()) continue;
+ if (field.is_comptime or !field.ty.hasRuntimeBits()) continue;
- const field_align = field.normalAlignment(target);
+ const field_align = field.alignment(target, struct_obj.layout);
big_align = @maximum(big_align, field_align);
const prev_offset = offset;
offset = std.mem.alignForwardGeneric(u64, offset, field_align);
@@ -3457,7 +3535,7 @@ pub const DeclGen = struct {
});
}
const union_obj = tv.ty.cast(Type.Payload.Union).?.data;
- const field_index = union_obj.tag_ty.enumTagFieldIndex(tag_and_val.tag, dg.module).?;
+ const field_index = tv.ty.unionTagFieldIndex(tag_and_val.tag, dg.module).?;
assert(union_obj.haveFieldTypes());
// Sometimes we must make an unnamed struct because LLVM does
@@ -3976,6 +4054,9 @@ pub const FuncGen = struct {
/// Note that this can disagree with isByRef for the return type in the case
/// of C ABI functions.
ret_ptr: ?*const llvm.Value,
+ /// Any function that needs to perform Valgrind client requests needs an array alloca
+ /// instruction, however a maximum of one per function is needed.
+ valgrind_client_request_array: ?*const llvm.Value = null,
/// These fields are used to refer to the LLVM value of the function parameters
/// in an Arg instruction.
/// This list may be shorter than the list according to the zig type system;
@@ -4215,6 +4296,9 @@ pub const FuncGen = struct {
.union_init => try self.airUnionInit(inst),
.prefetch => try self.airPrefetch(inst),
+ .is_named_enum_value => try self.airIsNamedEnumValue(inst),
+ .error_set_has_value => try self.airErrorSetHasValue(inst),
+
.reduce => try self.airReduce(inst, false),
.reduce_optimized => try self.airReduce(inst, true),
@@ -4423,6 +4507,39 @@ pub const FuncGen = struct {
llvm_args.appendAssumeCapacity(load_inst);
}
},
+ .multiple_llvm_float => {
+ const arg = args[it.zig_index - 1];
+ const param_ty = self.air.typeOf(arg);
+ const llvm_floats = it.llvm_types_buffer[0..it.llvm_types_len];
+ const llvm_arg = try self.resolveInst(arg);
+ const is_by_ref = isByRef(param_ty);
+ const arg_ptr = if (is_by_ref) llvm_arg else p: {
+ const p = self.buildAlloca(llvm_arg.typeOf());
+ const store_inst = self.builder.buildStore(llvm_arg, p);
+ store_inst.setAlignment(param_ty.abiAlignment(target));
+ break :p p;
+ };
+
+ var field_types_buf: [8]*const llvm.Type = undefined;
+ const field_types = field_types_buf[0..llvm_floats.len];
+ for (llvm_floats) |float_bits, i| {
+ switch (float_bits) {
+ 64 => field_types[i] = self.dg.context.doubleType(),
+ 80 => field_types[i] = self.dg.context.x86FP80Type(),
+ else => {},
+ }
+ }
+ const ints_llvm_ty = self.dg.context.structType(field_types.ptr, @intCast(c_uint, field_types.len), .False);
+ const casted_ptr = self.builder.buildBitCast(arg_ptr, ints_llvm_ty.pointerType(0), "");
+ try llvm_args.ensureUnusedCapacity(it.llvm_types_len);
+ for (llvm_floats) |_, i_usize| {
+ const i = @intCast(c_uint, i_usize);
+ const field_ptr = self.builder.buildStructGEP(casted_ptr, i, "");
+ const load_inst = self.builder.buildLoad(field_ptr, "");
+ load_inst.setAlignment(target.cpu.arch.ptrBitWidth() / 8);
+ llvm_args.appendAssumeCapacity(load_inst);
+ }
+ },
.as_u16 => {
const arg = args[it.zig_index - 1];
const llvm_arg = try self.resolveInst(arg);
@@ -5295,7 +5412,7 @@ pub const FuncGen = struct {
const same_size_int = self.context.intType(elem_bits);
const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, "");
return self.builder.buildBitCast(truncated_int, elem_llvm_ty, "");
- } else if (field_ty.zigTypeTag() == .Pointer) {
+ } else if (field_ty.isPtrAtRuntime()) {
const elem_bits = @intCast(c_uint, field_ty.bitSize(target));
const same_size_int = self.context.intType(elem_bits);
const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, "");
@@ -6166,7 +6283,9 @@ pub const FuncGen = struct {
}
const llvm_optional_ty = try self.dg.lowerType(optional_ty);
if (isByRef(optional_ty)) {
+ const target = self.dg.module.getTarget();
const optional_ptr = self.buildAlloca(llvm_optional_ty);
+ optional_ptr.setAlignment(optional_ty.abiAlignment(target));
const payload_ptr = self.builder.buildStructGEP(llvm_optional_ty, optional_ptr, 0, "");
var ptr_ty_payload: Type.Payload.ElemType = .{
.base = .{ .tag = .single_mut_pointer },
@@ -6186,20 +6305,21 @@ pub const FuncGen = struct {
if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const inst_ty = self.air.typeOfIndex(inst);
+ const err_un_ty = self.air.typeOfIndex(inst);
const operand = try self.resolveInst(ty_op.operand);
const payload_ty = self.air.typeOf(ty_op.operand);
if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
return operand;
}
const ok_err_code = (try self.dg.lowerType(Type.anyerror)).constNull();
- const err_un_llvm_ty = try self.dg.lowerType(inst_ty);
+ const err_un_llvm_ty = try self.dg.lowerType(err_un_ty);
const target = self.dg.module.getTarget();
const payload_offset = errUnionPayloadOffset(payload_ty, target);
const error_offset = errUnionErrorOffset(payload_ty, target);
- if (isByRef(inst_ty)) {
+ if (isByRef(err_un_ty)) {
const result_ptr = self.buildAlloca(err_un_llvm_ty);
+ result_ptr.setAlignment(err_un_ty.abiAlignment(target));
const err_ptr = self.builder.buildStructGEP(err_un_llvm_ty, result_ptr, error_offset, "");
const store_inst = self.builder.buildStore(ok_err_code, err_ptr);
store_inst.setAlignment(Type.anyerror.abiAlignment(target));
@@ -6234,6 +6354,7 @@ pub const FuncGen = struct {
const error_offset = errUnionErrorOffset(payload_ty, target);
if (isByRef(err_un_ty)) {
const result_ptr = self.buildAlloca(err_un_llvm_ty);
+ result_ptr.setAlignment(err_un_ty.abiAlignment(target));
const err_ptr = self.builder.buildStructGEP(err_un_llvm_ty, result_ptr, error_offset, "");
const store_inst = self.builder.buildStore(operand, err_ptr);
store_inst.setAlignment(Type.anyerror.abiAlignment(target));
@@ -7412,7 +7533,7 @@ pub const FuncGen = struct {
const lbrace_col = func.lbrace_column + 1;
const di_local_var = dib.createParameterVariable(
self.di_scope.?,
- func.getParamName(src_index).ptr, // TODO test 0 bit args
+ func.getParamName(self.dg.module, src_index).ptr, // TODO test 0 bit args
self.di_file.?,
lbrace_line,
try self.dg.object.lowerDebugType(inst_ty, .full),
@@ -7515,8 +7636,7 @@ pub const FuncGen = struct {
const len = usize_llvm_ty.constInt(operand_size, .False);
_ = self.builder.buildMemSet(dest_ptr_u8, fill_char, len, dest_ptr_align, ptr_ty.isVolatilePtr());
if (self.dg.module.comp.bin_file.options.valgrind) {
- // TODO generate valgrind client request to mark byte range as undefined
- // see gen_valgrind_undef() in codegen.cpp
+ self.valgrindMarkUndef(dest_ptr, len);
}
} else {
const src_operand = try self.resolveInst(bin_op.rhs);
@@ -7786,8 +7906,7 @@ pub const FuncGen = struct {
_ = self.builder.buildMemSet(dest_ptr_u8, fill_char, len, dest_ptr_align, ptr_ty.isVolatilePtr());
if (val_is_undef and self.dg.module.comp.bin_file.options.valgrind) {
- // TODO generate valgrind client request to mark byte range as undefined
- // see gen_valgrind_undef() in codegen.cpp
+ self.valgrindMarkUndef(dest_ptr_u8, len);
}
return null;
}
@@ -7994,6 +8113,134 @@ pub const FuncGen = struct {
}
}
+ fn airErrorSetHasValue(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
+
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const operand = try self.resolveInst(ty_op.operand);
+ const error_set_ty = self.air.getRefType(ty_op.ty);
+
+ const names = error_set_ty.errorSetNames();
+ const valid_block = self.dg.context.appendBasicBlock(self.llvm_func, "Valid");
+ const invalid_block = self.dg.context.appendBasicBlock(self.llvm_func, "Invalid");
+ const end_block = self.context.appendBasicBlock(self.llvm_func, "End");
+ const switch_instr = self.builder.buildSwitch(operand, invalid_block, @intCast(c_uint, names.len));
+
+ for (names) |name| {
+ const err_int = self.dg.module.global_error_set.get(name).?;
+ const this_tag_int_value = int: {
+ var tag_val_payload: Value.Payload.U64 = .{
+ .base = .{ .tag = .int_u64 },
+ .data = err_int,
+ };
+ break :int try self.dg.lowerValue(.{
+ .ty = Type.err_int,
+ .val = Value.initPayload(&tag_val_payload.base),
+ });
+ };
+ switch_instr.addCase(this_tag_int_value, valid_block);
+ }
+ self.builder.positionBuilderAtEnd(valid_block);
+ _ = self.builder.buildBr(end_block);
+
+ self.builder.positionBuilderAtEnd(invalid_block);
+ _ = self.builder.buildBr(end_block);
+
+ self.builder.positionBuilderAtEnd(end_block);
+
+ const llvm_type = self.dg.context.intType(1);
+ const incoming_values: [2]*const llvm.Value = .{
+ llvm_type.constInt(1, .False), llvm_type.constInt(0, .False),
+ };
+ const incoming_blocks: [2]*const llvm.BasicBlock = .{
+ valid_block, invalid_block,
+ };
+ const phi_node = self.builder.buildPhi(llvm_type, "");
+ phi_node.addIncoming(&incoming_values, &incoming_blocks, 2);
+ return phi_node;
+ }
+
+ fn airIsNamedEnumValue(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
+
+ const un_op = self.air.instructions.items(.data)[inst].un_op;
+ const operand = try self.resolveInst(un_op);
+ const enum_ty = self.air.typeOf(un_op);
+
+ const llvm_fn = try self.getIsNamedEnumValueFunction(enum_ty);
+ const params = [_]*const llvm.Value{operand};
+ return self.builder.buildCall(llvm_fn, ¶ms, params.len, .Fast, .Auto, "");
+ }
+
+ fn getIsNamedEnumValueFunction(self: *FuncGen, enum_ty: Type) !*const llvm.Value {
+ const enum_decl = enum_ty.getOwnerDecl();
+
+ // TODO: detect when the type changes and re-emit this function.
+ const gop = try self.dg.object.named_enum_map.getOrPut(self.dg.gpa, enum_decl);
+ if (gop.found_existing) return gop.value_ptr.*;
+ errdefer assert(self.dg.object.named_enum_map.remove(enum_decl));
+
+ var arena_allocator = std.heap.ArenaAllocator.init(self.gpa);
+ defer arena_allocator.deinit();
+ const arena = arena_allocator.allocator();
+
+ const mod = self.dg.module;
+ const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_is_named_enum_value_{s}", .{
+ try mod.declPtr(enum_decl).getFullyQualifiedName(mod),
+ });
+
+ var int_tag_type_buffer: Type.Payload.Bits = undefined;
+ const int_tag_ty = enum_ty.intTagType(&int_tag_type_buffer);
+ const param_types = [_]*const llvm.Type{try self.dg.lowerType(int_tag_ty)};
+
+ const llvm_ret_ty = try self.dg.lowerType(Type.bool);
+ const fn_type = llvm.functionType(llvm_ret_ty, ¶m_types, param_types.len, .False);
+ const fn_val = self.dg.object.llvm_module.addFunction(llvm_fn_name, fn_type);
+ fn_val.setLinkage(.Internal);
+ fn_val.setFunctionCallConv(.Fast);
+ self.dg.addCommonFnAttributes(fn_val);
+ gop.value_ptr.* = fn_val;
+
+ const prev_block = self.builder.getInsertBlock();
+ const prev_debug_location = self.builder.getCurrentDebugLocation2();
+ defer {
+ self.builder.positionBuilderAtEnd(prev_block);
+ if (self.di_scope != null) {
+ self.builder.setCurrentDebugLocation2(prev_debug_location);
+ }
+ }
+
+ const entry_block = self.dg.context.appendBasicBlock(fn_val, "Entry");
+ self.builder.positionBuilderAtEnd(entry_block);
+ self.builder.clearCurrentDebugLocation();
+
+ const fields = enum_ty.enumFields();
+ const named_block = self.dg.context.appendBasicBlock(fn_val, "Named");
+ const unnamed_block = self.dg.context.appendBasicBlock(fn_val, "Unnamed");
+ const tag_int_value = fn_val.getParam(0);
+ const switch_instr = self.builder.buildSwitch(tag_int_value, unnamed_block, @intCast(c_uint, fields.count()));
+
+ for (fields.keys()) |_, field_index| {
+ const this_tag_int_value = int: {
+ var tag_val_payload: Value.Payload.U32 = .{
+ .base = .{ .tag = .enum_field_index },
+ .data = @intCast(u32, field_index),
+ };
+ break :int try self.dg.lowerValue(.{
+ .ty = enum_ty,
+ .val = Value.initPayload(&tag_val_payload.base),
+ });
+ };
+ switch_instr.addCase(this_tag_int_value, named_block);
+ }
+ self.builder.positionBuilderAtEnd(named_block);
+ _ = self.builder.buildRet(self.dg.context.intType(1).constInt(1, .False));
+
+ self.builder.positionBuilderAtEnd(unnamed_block);
+ _ = self.builder.buildRet(self.dg.context.intType(1).constInt(0, .False));
+ return fn_val;
+ }
+
fn airTagName(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
if (self.liveness.isUnused(inst)) return null;
@@ -8272,8 +8519,8 @@ pub const FuncGen = struct {
.Struct => {
if (result_ty.containerLayout() == .Packed) {
const struct_obj = result_ty.castTag(.@"struct").?.data;
- const big_bits = struct_obj.packedIntegerBits(target);
- const int_llvm_ty = self.dg.context.intType(big_bits);
+ const big_bits = struct_obj.backing_int_ty.bitSize(target);
+ const int_llvm_ty = self.dg.context.intType(@intCast(c_uint, big_bits));
const fields = struct_obj.fields.values();
comptime assert(Type.packed_struct_layout_version == 2);
var running_int: *const llvm.Value = int_llvm_ty.constNull();
@@ -8285,7 +8532,7 @@ pub const FuncGen = struct {
const non_int_val = try self.resolveInst(elem);
const ty_bit_size = @intCast(u16, field.ty.bitSize(target));
const small_int_ty = self.dg.context.intType(ty_bit_size);
- const small_int_val = if (field.ty.zigTypeTag() == .Pointer)
+ const small_int_val = if (field.ty.isPtrAtRuntime())
self.builder.buildPtrToInt(non_int_val, small_int_ty, "")
else
self.builder.buildBitCast(non_int_val, small_int_ty, "");
@@ -8973,6 +9220,89 @@ pub const FuncGen = struct {
info.@"volatile",
);
}
+
+ fn valgrindMarkUndef(fg: *FuncGen, ptr: *const llvm.Value, len: *const llvm.Value) void {
+ const VG_USERREQ__MAKE_MEM_UNDEFINED = 1296236545;
+ const target = fg.dg.module.getTarget();
+ const usize_llvm_ty = fg.context.intType(target.cpu.arch.ptrBitWidth());
+ const zero = usize_llvm_ty.constInt(0, .False);
+ const req = usize_llvm_ty.constInt(VG_USERREQ__MAKE_MEM_UNDEFINED, .False);
+ const ptr_as_usize = fg.builder.buildPtrToInt(ptr, usize_llvm_ty, "");
+ _ = valgrindClientRequest(fg, zero, req, ptr_as_usize, len, zero, zero, zero);
+ }
+
+ fn valgrindClientRequest(
+ fg: *FuncGen,
+ default_value: *const llvm.Value,
+ request: *const llvm.Value,
+ a1: *const llvm.Value,
+ a2: *const llvm.Value,
+ a3: *const llvm.Value,
+ a4: *const llvm.Value,
+ a5: *const llvm.Value,
+ ) *const llvm.Value {
+ const target = fg.dg.module.getTarget();
+ if (!target_util.hasValgrindSupport(target)) return default_value;
+
+ const usize_llvm_ty = fg.context.intType(target.cpu.arch.ptrBitWidth());
+ const usize_alignment = @intCast(c_uint, Type.usize.abiSize(target));
+
+ switch (target.cpu.arch) {
+ .x86_64 => {
+ const array_ptr = fg.valgrind_client_request_array orelse a: {
+ const array_ptr = fg.buildAlloca(usize_llvm_ty.arrayType(6));
+ array_ptr.setAlignment(usize_alignment);
+ fg.valgrind_client_request_array = array_ptr;
+ break :a array_ptr;
+ };
+ const array_elements = [_]*const llvm.Value{ request, a1, a2, a3, a4, a5 };
+ const zero = usize_llvm_ty.constInt(0, .False);
+ for (array_elements) |elem, i| {
+ const indexes = [_]*const llvm.Value{
+ zero, usize_llvm_ty.constInt(@intCast(c_uint, i), .False),
+ };
+ const elem_ptr = fg.builder.buildInBoundsGEP(array_ptr, &indexes, indexes.len, "");
+ const store_inst = fg.builder.buildStore(elem, elem_ptr);
+ store_inst.setAlignment(usize_alignment);
+ }
+
+ const asm_template =
+ \\rolq $$3, %rdi ; rolq $$13, %rdi
+ \\rolq $$61, %rdi ; rolq $$51, %rdi
+ \\xchgq %rbx,%rbx
+ ;
+
+ const asm_constraints = "={rdx},{rax},0,~{cc},~{memory}";
+
+ const array_ptr_as_usize = fg.builder.buildPtrToInt(array_ptr, usize_llvm_ty, "");
+ const args = [_]*const llvm.Value{ array_ptr_as_usize, default_value };
+ const param_types = [_]*const llvm.Type{ usize_llvm_ty, usize_llvm_ty };
+ const fn_llvm_ty = llvm.functionType(usize_llvm_ty, ¶m_types, args.len, .False);
+ const asm_fn = llvm.getInlineAsm(
+ fn_llvm_ty,
+ asm_template,
+ asm_template.len,
+ asm_constraints,
+ asm_constraints.len,
+ .True, // has side effects
+ .False, // alignstack
+ .ATT,
+ .False,
+ );
+
+ const call = fg.builder.buildCall(
+ asm_fn,
+ &args,
+ args.len,
+ .C,
+ .Auto,
+ "",
+ );
+ return call;
+ },
+ else => unreachable,
+ }
+ }
};
fn initializeLLVMTarget(arch: std.Target.Cpu.Arch) void {
@@ -9266,13 +9596,14 @@ fn llvmFieldIndex(
}
return null;
}
- assert(ty.containerLayout() != .Packed);
+ const layout = ty.containerLayout();
+ assert(layout != .Packed);
var llvm_field_index: c_uint = 0;
for (ty.structFields().values()) |field, i| {
- if (field.is_comptime or !field.ty.hasRuntimeBitsIgnoreComptime()) continue;
+ if (field.is_comptime or !field.ty.hasRuntimeBits()) continue;
- const field_align = field.normalAlignment(target);
+ const field_align = field.alignment(target, layout);
big_align = @maximum(big_align, field_align);
const prev_offset = offset;
offset = std.mem.alignForwardGeneric(u64, offset, field_align);
@@ -9392,16 +9723,20 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*const llvm.
llvm_types_index += 1;
},
.sse => {
- @panic("TODO");
+ llvm_types_buffer[llvm_types_index] = dg.context.doubleType();
+ llvm_types_index += 1;
},
.sseup => {
- @panic("TODO");
+ llvm_types_buffer[llvm_types_index] = dg.context.doubleType();
+ llvm_types_index += 1;
},
.x87 => {
- @panic("TODO");
+ llvm_types_buffer[llvm_types_index] = dg.context.x86FP80Type();
+ llvm_types_index += 1;
},
.x87up => {
- @panic("TODO");
+ llvm_types_buffer[llvm_types_index] = dg.context.x86FP80Type();
+ llvm_types_index += 1;
},
.complex_x87 => {
@panic("TODO");
@@ -9447,6 +9782,7 @@ const ParamTypeIterator = struct {
target: std.Target,
llvm_types_len: u32,
llvm_types_buffer: [8]u16,
+ byval_attr: bool,
const Lowering = enum {
no_bits,
@@ -9454,6 +9790,7 @@ const ParamTypeIterator = struct {
byref,
abi_sized_int,
multiple_llvm_ints,
+ multiple_llvm_float,
slice,
as_u16,
};
@@ -9461,6 +9798,7 @@ const ParamTypeIterator = struct {
pub fn next(it: *ParamTypeIterator) ?Lowering {
if (it.zig_index >= it.fn_info.param_types.len) return null;
const ty = it.fn_info.param_types[it.zig_index];
+ it.byval_attr = false;
return nextInner(it, ty);
}
@@ -9546,6 +9884,7 @@ const ParamTypeIterator = struct {
.memory => {
it.zig_index += 1;
it.llvm_index += 1;
+ it.byval_attr = true;
return .byref;
},
.sse => {
@@ -9565,6 +9904,7 @@ const ParamTypeIterator = struct {
if (classes[0] == .memory) {
it.zig_index += 1;
it.llvm_index += 1;
+ it.byval_attr = true;
return .byref;
}
var llvm_types_buffer: [8]u16 = undefined;
@@ -9576,16 +9916,20 @@ const ParamTypeIterator = struct {
llvm_types_index += 1;
},
.sse => {
- @panic("TODO");
+ llvm_types_buffer[llvm_types_index] = 64;
+ llvm_types_index += 1;
},
.sseup => {
- @panic("TODO");
+ llvm_types_buffer[llvm_types_index] = 64;
+ llvm_types_index += 1;
},
.x87 => {
- @panic("TODO");
+ llvm_types_buffer[llvm_types_index] = 80;
+ llvm_types_index += 1;
},
.x87up => {
- @panic("TODO");
+ llvm_types_buffer[llvm_types_index] = 80;
+ llvm_types_index += 1;
},
.complex_x87 => {
@panic("TODO");
@@ -9599,11 +9943,16 @@ const ParamTypeIterator = struct {
it.llvm_index += 1;
return .abi_sized_int;
}
+ if (classes[0] == .sse and classes[1] == .none) {
+ it.zig_index += 1;
+ it.llvm_index += 1;
+ return .byval;
+ }
it.llvm_types_buffer = llvm_types_buffer;
it.llvm_types_len = llvm_types_index;
it.llvm_index += llvm_types_index;
it.zig_index += 1;
- return .multiple_llvm_ints;
+ return if (classes[0] == .integer) .multiple_llvm_ints else .multiple_llvm_float;
},
},
.wasm32 => {
@@ -9644,6 +9993,7 @@ fn iterateParamTypes(dg: *DeclGen, fn_info: Type.Payload.Function.Data) ParamTyp
.target = dg.module.getTarget(),
.llvm_types_buffer = undefined,
.llvm_types_len = 0,
+ .byval_attr = false,
};
}
diff --git a/src/codegen/llvm/bindings.zig b/src/codegen/llvm/bindings.zig
index 4090054800..d1a386ac60 100644
--- a/src/codegen/llvm/bindings.zig
+++ b/src/codegen/llvm/bindings.zig
@@ -129,6 +129,9 @@ pub const Value = opaque {
pub const setThreadLocalMode = LLVMSetThreadLocalMode;
extern fn LLVMSetThreadLocalMode(Global: *const Value, Mode: ThreadLocalMode) void;
+ pub const setSection = LLVMSetSection;
+ extern fn LLVMSetSection(Global: *const Value, Section: [*:0]const u8) void;
+
pub const deleteGlobal = LLVMDeleteGlobal;
extern fn LLVMDeleteGlobal(GlobalVar: *const Value) void;
@@ -216,6 +219,9 @@ pub const Value = opaque {
pub const setInitializer = LLVMSetInitializer;
extern fn LLVMSetInitializer(GlobalVar: *const Value, ConstantVal: *const Value) void;
+ pub const setDLLStorageClass = LLVMSetDLLStorageClass;
+ extern fn LLVMSetDLLStorageClass(Global: *const Value, Class: DLLStorageClass) void;
+
pub const addCase = LLVMAddCase;
extern fn LLVMAddCase(Switch: *const Value, OnVal: *const Value, Dest: *const BasicBlock) void;
@@ -244,6 +250,9 @@ pub const Value = opaque {
pub const getGEPResultElementType = ZigLLVMGetGEPResultElementType;
extern fn ZigLLVMGetGEPResultElementType(GEP: *const Value) *const Type;
+
+ pub const addByValAttr = ZigLLVMAddByValAttr;
+ extern fn ZigLLVMAddByValAttr(Fn: *const Value, ArgNo: c_uint, type: *const Type) void;
};
pub const Type = opaque {
@@ -1486,6 +1495,12 @@ pub const CallAttr = enum(c_int) {
AlwaysInline,
};
+pub const DLLStorageClass = enum(c_uint) {
+ Default,
+ DLLImport,
+ DLLExport,
+};
+
pub const address_space = struct {
pub const default: c_uint = 0;
diff --git a/src/config.zig.in b/src/config.zig.in
index a886b2d28e..12e13815f8 100644
--- a/src/config.zig.in
+++ b/src/config.zig.in
@@ -8,6 +8,5 @@ pub const enable_logging: bool = @ZIG_ENABLE_LOGGING_BOOL@;
pub const enable_link_snapshots: bool = false;
pub const enable_tracy = false;
pub const value_tracing = false;
-pub const is_stage1 = true;
+pub const have_stage1 = true;
pub const skip_non_native = false;
-pub const omit_stage2: bool = @ZIG_OMIT_STAGE2_BOOL@;
diff --git a/src/glibc.zig b/src/glibc.zig
index 4deac5275f..4e33867169 100644
--- a/src/glibc.zig
+++ b/src/glibc.zig
@@ -41,29 +41,7 @@ pub const libs = [_]Lib{
.{ .name = "rt", .sover = 1 },
.{ .name = "ld", .sover = 2 },
.{ .name = "util", .sover = 1 },
-};
-
-// glibc's naming of Zig architectures
-const Arch = enum(c_int) {
- arm,
- armeb,
- aarch64,
- aarch64_be,
- mips,
- mipsel,
- mips64,
- mips64el,
- powerpc,
- powerpc64,
- powerpc64le,
- riscv32,
- riscv64,
- sparc,
- sparcv9,
- sparcel,
- s390x,
- i386,
- x86_64,
+ .{ .name = "resolv", .sover = 2 },
};
pub const LoadMetaDataError = error{
@@ -157,7 +135,7 @@ pub fn loadMetaData(gpa: Allocator, zig_lib_dir: fs.Dir) LoadMetaDataError!*ABI
log.err("abilists: expected ABI name", .{});
return error.ZigInstallationCorrupt;
};
- const arch_tag = std.meta.stringToEnum(Arch, arch_name) orelse {
+ const arch_tag = std.meta.stringToEnum(std.Target.Cpu.Arch, arch_name) orelse {
log.err("abilists: unrecognized arch: '{s}'", .{arch_name});
return error.ZigInstallationCorrupt;
};
@@ -171,7 +149,7 @@ pub fn loadMetaData(gpa: Allocator, zig_lib_dir: fs.Dir) LoadMetaDataError!*ABI
};
targets[i] = .{
- .arch = glibcToZigArch(arch_tag),
+ .arch = arch_tag,
.os = .linux,
.abi = abi_tag,
};
@@ -1111,6 +1089,7 @@ fn buildSharedLib(
.optimize_mode = comp.compilerRtOptMode(),
.want_sanitize_c = false,
.want_stack_check = false,
+ .want_stack_protector = 0,
.want_red_zone = comp.bin_file.options.red_zone,
.omit_frame_pointer = comp.bin_file.options.omit_frame_pointer,
.want_valgrind = false,
@@ -1138,30 +1117,6 @@ fn buildSharedLib(
try sub_compilation.updateSubCompilation();
}
-fn glibcToZigArch(arch_tag: Arch) std.Target.Cpu.Arch {
- return switch (arch_tag) {
- .arm => .arm,
- .armeb => .armeb,
- .aarch64 => .aarch64,
- .aarch64_be => .aarch64_be,
- .mips => .mips,
- .mipsel => .mipsel,
- .mips64 => .mips64,
- .mips64el => .mips64el,
- .powerpc => .powerpc,
- .powerpc64 => .powerpc64,
- .powerpc64le => .powerpc64le,
- .riscv32 => .riscv32,
- .riscv64 => .riscv64,
- .sparc => .sparc,
- .sparcv9 => .sparc64, // In glibc, sparc64 is called sparcv9.
- .sparcel => .sparcel,
- .s390x => .s390x,
- .i386 => .i386,
- .x86_64 => .x86_64,
- };
-}
-
// Return true if glibc has crti/crtn sources for that architecture.
pub fn needsCrtiCrtn(target: std.Target) bool {
return switch (target.cpu.arch) {
diff --git a/src/libcxx.zig b/src/libcxx.zig
index 301652fe6b..2f10a798f2 100644
--- a/src/libcxx.zig
+++ b/src/libcxx.zig
@@ -208,6 +208,7 @@ pub fn buildLibCXX(comp: *Compilation) !void {
.link_mode = link_mode,
.want_sanitize_c = false,
.want_stack_check = false,
+ .want_stack_protector = 0,
.want_red_zone = comp.bin_file.options.red_zone,
.omit_frame_pointer = comp.bin_file.options.omit_frame_pointer,
.want_valgrind = false,
@@ -351,6 +352,7 @@ pub fn buildLibCXXABI(comp: *Compilation) !void {
.link_mode = link_mode,
.want_sanitize_c = false,
.want_stack_check = false,
+ .want_stack_protector = 0,
.want_red_zone = comp.bin_file.options.red_zone,
.omit_frame_pointer = comp.bin_file.options.omit_frame_pointer,
.want_valgrind = false,
diff --git a/src/libtsan.zig b/src/libtsan.zig
index cbbcd11a8a..16e40c16f8 100644
--- a/src/libtsan.zig
+++ b/src/libtsan.zig
@@ -211,6 +211,7 @@ pub fn buildTsan(comp: *Compilation) !void {
.link_mode = link_mode,
.want_sanitize_c = false,
.want_stack_check = false,
+ .want_stack_protector = 0,
.want_valgrind = false,
.want_tsan = false,
.want_pic = true,
diff --git a/src/libunwind.zig b/src/libunwind.zig
index ea09959c1c..cc4029c6bf 100644
--- a/src/libunwind.zig
+++ b/src/libunwind.zig
@@ -102,6 +102,7 @@ pub fn buildStaticLib(comp: *Compilation) !void {
.link_mode = link_mode,
.want_sanitize_c = false,
.want_stack_check = false,
+ .want_stack_protector = 0,
.want_red_zone = comp.bin_file.options.red_zone,
.omit_frame_pointer = comp.bin_file.options.omit_frame_pointer,
.want_valgrind = false,
diff --git a/src/link.zig b/src/link.zig
index ec8e95a626..7f5f1ebc4b 100644
--- a/src/link.zig
+++ b/src/link.zig
@@ -72,7 +72,6 @@ pub const Options = struct {
target: std.Target,
output_mode: std.builtin.OutputMode,
link_mode: std.builtin.LinkMode,
- object_format: std.Target.ObjectFormat,
optimize_mode: std.builtin.Mode,
machine_code_model: std.builtin.CodeModel,
root_name: [:0]const u8,
@@ -91,6 +90,9 @@ pub const Options = struct {
entry: ?[]const u8,
stack_size_override: ?u64,
image_base_override: ?u64,
+ /// 0 means no stack protector
+ /// other value means stack protector with that buffer size.
+ stack_protector: u32,
cache_mode: CacheMode,
include_compiler_rt: bool,
/// Set to `true` to omit debug info.
@@ -173,6 +175,12 @@ pub const Options = struct {
lib_dirs: []const []const u8,
rpath_list: []const []const u8,
+ /// List of symbols forced as undefined in the symbol table
+ /// thus forcing their resolution by the linker.
+ /// Corresponds to `-u ` for ELF and `/include:` for COFF/PE.
+ /// TODO add handling for MachO.
+ force_undefined_symbols: std.StringArrayHashMapUnmanaged(void),
+
version: ?std.builtin.Version,
compatibility_version: ?std.builtin.Version,
libc_installation: ?*const LibCInstallation,
@@ -273,13 +281,13 @@ pub const File = struct {
/// rewriting it. A malicious file is detected as incremental link failure
/// and does not cause Illegal Behavior. This operation is not atomic.
pub fn openPath(allocator: Allocator, options: Options) !*File {
- if (options.object_format == .macho) {
+ if (options.target.ofmt == .macho) {
return &(try MachO.openPath(allocator, options)).base;
}
- const use_stage1 = build_options.is_stage1 and options.use_stage1;
+ const use_stage1 = build_options.have_stage1 and options.use_stage1;
if (use_stage1 or options.emit == null) {
- return switch (options.object_format) {
+ return switch (options.target.ofmt) {
.coff => &(try Coff.createEmpty(allocator, options)).base,
.elf => &(try Elf.createEmpty(allocator, options)).base,
.macho => unreachable,
@@ -299,7 +307,7 @@ pub const File = struct {
if (options.module == null) {
// No point in opening a file, we would not write anything to it.
// Initialize with empty.
- return switch (options.object_format) {
+ return switch (options.target.ofmt) {
.coff => &(try Coff.createEmpty(allocator, options)).base,
.elf => &(try Elf.createEmpty(allocator, options)).base,
.macho => unreachable,
@@ -316,12 +324,12 @@ pub const File = struct {
// Open a temporary object file, not the final output file because we
// want to link with LLD.
break :blk try std.fmt.allocPrint(allocator, "{s}{s}", .{
- emit.sub_path, options.object_format.fileExt(options.target.cpu.arch),
+ emit.sub_path, options.target.ofmt.fileExt(options.target.cpu.arch),
});
} else emit.sub_path;
errdefer if (use_lld) allocator.free(sub_path);
- const file: *File = switch (options.object_format) {
+ const file: *File = switch (options.target.ofmt) {
.coff => &(try Coff.openPath(allocator, sub_path, options)).base,
.elf => &(try Elf.openPath(allocator, sub_path, options)).base,
.macho => unreachable,
@@ -421,7 +429,7 @@ pub const File = struct {
NoSpaceLeft,
Unseekable,
PermissionDenied,
- FileBusy,
+ SwapFile,
SystemResources,
OperationAborted,
BrokenPipe,
@@ -438,6 +446,7 @@ pub const File = struct {
EmitFail,
NameTooLong,
CurrentWorkingDirectoryUnlinked,
+ LockViolation,
};
/// Called from within the CodeGen to lower a local variable instantion as an unnamed
@@ -774,12 +783,15 @@ pub const File = struct {
error.FileNotFound => {},
else => |e| return e,
}
- try std.fs.rename(
+ std.fs.rename(
cache_directory.handle,
tmp_dir_sub_path,
cache_directory.handle,
o_sub_path,
- );
+ ) catch |err| switch (err) {
+ error.AccessDenied => unreachable, // We are most likely trying to move a dir with open handles to its resources
+ else => |e| return e,
+ };
break;
} else {
std.fs.rename(
@@ -814,7 +826,7 @@ pub const File = struct {
// If there is no Zig code to compile, then we should skip flushing the output file
// because it will not be part of the linker line anyway.
const module_obj_path: ?[]const u8 = if (base.options.module) |module| blk: {
- const use_stage1 = build_options.is_stage1 and base.options.use_stage1;
+ const use_stage1 = build_options.have_stage1 and base.options.use_stage1;
if (use_stage1) {
const obj_basename = try std.zig.binNameAlloc(arena, .{
.root_name = base.options.root_name,
diff --git a/src/link/C.zig b/src/link/C.zig
index 6449be9c56..955044f90d 100644
--- a/src/link/C.zig
+++ b/src/link/C.zig
@@ -48,7 +48,7 @@ const DeclBlock = struct {
};
pub fn openPath(gpa: Allocator, sub_path: []const u8, options: link.Options) !*C {
- assert(options.object_format == .c);
+ assert(options.target.ofmt == .c);
if (options.use_llvm) return error.LLVMHasNoCBackend;
if (options.use_lld) return error.LLDHasNoCBackend;
diff --git a/src/link/Coff.zig b/src/link/Coff.zig
index 1b5ddbbf8b..c30544d3b7 100644
--- a/src/link/Coff.zig
+++ b/src/link/Coff.zig
@@ -128,7 +128,7 @@ pub const TextBlock = struct {
pub const SrcFn = void;
pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Options) !*Coff {
- assert(options.object_format == .coff);
+ assert(options.target.ofmt == .coff);
if (build_options.have_llvm and options.use_llvm) {
return createEmpty(allocator, options);
@@ -204,15 +204,18 @@ pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Option
index += 2;
// Characteristics
- var characteristics: u16 = std.coff.IMAGE_FILE_DEBUG_STRIPPED | std.coff.IMAGE_FILE_RELOCS_STRIPPED; // TODO Remove debug info stripped flag when necessary
+ var characteristics: std.coff.CoffHeaderFlags = .{
+ .DEBUG_STRIPPED = 1, // TODO remove debug info stripped flag when necessary
+ .RELOCS_STRIPPED = 1,
+ };
if (options.output_mode == .Exe) {
- characteristics |= std.coff.IMAGE_FILE_EXECUTABLE_IMAGE;
+ characteristics.EXECUTABLE_IMAGE = 1;
}
switch (self.ptr_width) {
- .p32 => characteristics |= std.coff.IMAGE_FILE_32BIT_MACHINE,
- .p64 => characteristics |= std.coff.IMAGE_FILE_LARGE_ADDRESS_AWARE,
+ .p32 => characteristics.@"32BIT_MACHINE" = 1,
+ .p64 => characteristics.LARGE_ADDRESS_AWARE = 1,
}
- mem.writeIntLittle(u16, hdr_data[index..][0..2], characteristics);
+ mem.writeIntLittle(u16, hdr_data[index..][0..2], @bitCast(u16, characteristics));
index += 2;
assert(index == 20);
@@ -352,7 +355,10 @@ pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Option
mem.set(u8, hdr_data[index..][0..12], 0);
index += 12;
// Section flags
- mem.writeIntLittle(u32, hdr_data[index..][0..4], std.coff.IMAGE_SCN_CNT_INITIALIZED_DATA | std.coff.IMAGE_SCN_MEM_READ);
+ mem.writeIntLittle(u32, hdr_data[index..][0..4], @bitCast(u32, std.coff.SectionHeaderFlags{
+ .CNT_INITIALIZED_DATA = 1,
+ .MEM_READ = 1,
+ }));
index += 4;
// Then, the .text section
hdr_data[index..][0..8].* = ".text\x00\x00\x00".*;
@@ -378,11 +384,12 @@ pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Option
mem.set(u8, hdr_data[index..][0..12], 0);
index += 12;
// Section flags
- mem.writeIntLittle(
- u32,
- hdr_data[index..][0..4],
- std.coff.IMAGE_SCN_CNT_CODE | std.coff.IMAGE_SCN_MEM_EXECUTE | std.coff.IMAGE_SCN_MEM_READ | std.coff.IMAGE_SCN_MEM_WRITE,
- );
+ mem.writeIntLittle(u32, hdr_data[index..][0..4], @bitCast(u32, std.coff.SectionHeaderFlags{
+ .CNT_CODE = 1,
+ .MEM_EXECUTE = 1,
+ .MEM_READ = 1,
+ .MEM_WRITE = 1,
+ }));
index += 4;
assert(index == optional_header_size + section_table_size);
@@ -411,7 +418,7 @@ pub fn createEmpty(gpa: Allocator, options: link.Options) !*Coff {
};
const use_llvm = build_options.have_llvm and options.use_llvm;
- const use_stage1 = build_options.is_stage1 and options.use_stage1;
+ const use_stage1 = build_options.have_stage1 and options.use_stage1;
if (use_llvm and !use_stage1) {
self.llvm_object = try LlvmObject.create(gpa, options);
}
@@ -949,7 +956,7 @@ fn linkWithLLD(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Node) !
// If there is no Zig code to compile, then we should skip flushing the output file because it
// will not be part of the linker line anyway.
const module_obj_path: ?[]const u8 = if (self.base.options.module) |module| blk: {
- const use_stage1 = build_options.is_stage1 and self.base.options.use_stage1;
+ const use_stage1 = build_options.have_stage1 and self.base.options.use_stage1;
if (use_stage1) {
const obj_basename = try std.zig.binNameAlloc(arena, .{
.root_name = self.base.options.root_name,
@@ -1126,6 +1133,10 @@ fn linkWithLLD(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Node) !
}
}
+ for (self.base.options.force_undefined_symbols.keys()) |symbol| {
+ try argv.append(try allocPrint(arena, "-INCLUDE:{s}", .{symbol}));
+ }
+
if (is_dyn_lib) {
try argv.append("-DLL");
}
diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig
index 627f946e36..3ae151491f 100644
--- a/src/link/Dwarf.zig
+++ b/src/link/Dwarf.zig
@@ -102,7 +102,7 @@ pub const DeclState = struct {
}
pub fn addExprlocReloc(self: *DeclState, target: u32, offset: u32, is_ptr: bool) !void {
- log.debug("{x}: target sym @{d}, via GOT {}", .{ offset, target, is_ptr });
+ log.debug("{x}: target sym %{d}, via GOT {}", .{ offset, target, is_ptr });
try self.exprloc_relocs.append(self.gpa, .{
.@"type" = if (is_ptr) .got_load else .direct_load,
.target = target,
@@ -135,7 +135,7 @@ pub const DeclState = struct {
.@"type" = ty,
.offset = undefined,
});
- log.debug("@{d}: {}", .{ sym_index, ty.fmtDebug() });
+ log.debug("%{d}: {}", .{ sym_index, ty.fmtDebug() });
try self.abbrev_resolver.putNoClobberContext(self.gpa, ty, sym_index, .{
.mod = self.mod,
});
@@ -143,7 +143,7 @@ pub const DeclState = struct {
.mod = self.mod,
}).?;
};
- log.debug("{x}: @{d} + 0", .{ offset, resolv });
+ log.debug("{x}: %{d} + 0", .{ offset, resolv });
try self.abbrev_relocs.append(self.gpa, .{
.target = resolv,
.atom = atom,
@@ -243,11 +243,13 @@ pub const DeclState = struct {
.Pointer => {
if (ty.isSlice()) {
// Slices are structs: struct { .ptr = *, .len = N }
+ const ptr_bits = target.cpu.arch.ptrBitWidth();
+ const ptr_bytes = @intCast(u8, @divExact(ptr_bits, 8));
// DW.AT.structure_type
try dbg_info_buffer.ensureUnusedCapacity(2);
dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.struct_type));
// DW.AT.byte_size, DW.FORM.sdata
- dbg_info_buffer.appendAssumeCapacity(@sizeOf(usize) * 2);
+ dbg_info_buffer.appendAssumeCapacity(ptr_bytes * 2);
// DW.AT.name, DW.FORM.string
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)});
// DW.AT.member
@@ -276,7 +278,7 @@ pub const DeclState = struct {
try self.addTypeRelocGlobal(atom, Type.usize, @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.sdata
try dbg_info_buffer.ensureUnusedCapacity(2);
- dbg_info_buffer.appendAssumeCapacity(@sizeOf(usize));
+ dbg_info_buffer.appendAssumeCapacity(ptr_bytes);
// DW.AT.structure_type delimit children
dbg_info_buffer.appendAssumeCapacity(0);
} else {
@@ -1054,6 +1056,7 @@ pub fn commitDeclState(
break :blk false;
};
if (deferred) {
+ log.debug("resolving %{d} deferred until flush", .{target});
try self.global_abbrev_relocs.append(gpa, .{
.target = null,
.offset = reloc.offset,
@@ -1061,10 +1064,12 @@ pub fn commitDeclState(
.addend = reloc.addend,
});
} else {
+ const value = symbol.atom.off + symbol.offset + reloc.addend;
+ log.debug("{x}: [() => {x}] (%{d}, '{}')", .{ reloc.offset, value, target, ty.fmtDebug() });
mem.writeInt(
u32,
dbg_info_buffer.items[reloc.offset..][0..@sizeOf(u32)],
- symbol.atom.off + symbol.offset + reloc.addend,
+ value,
target_endian,
);
}
@@ -1257,7 +1262,7 @@ fn writeDeclDebugInfo(self: *Dwarf, file: *File, atom: *Atom, dbg_info_buf: []co
debug_info_sect.addr = dwarf_segment.vmaddr + new_offset - dwarf_segment.fileoff;
}
debug_info_sect.size = needed_size;
- d_sym.debug_line_header_dirty = true;
+ d_sym.debug_info_header_dirty = true;
}
const file_pos = debug_info_sect.offset + atom.off;
try pwriteDbgInfoNops(
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index 254a8a8e6b..dbd98a84fd 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -249,7 +249,7 @@ pub const Export = struct {
};
pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Options) !*Elf {
- assert(options.object_format == .elf);
+ assert(options.target.ofmt == .elf);
if (build_options.have_llvm and options.use_llvm) {
return createEmpty(allocator, options);
@@ -328,7 +328,7 @@ pub fn createEmpty(gpa: Allocator, options: link.Options) !*Elf {
.page_size = page_size,
};
const use_llvm = build_options.have_llvm and options.use_llvm;
- const use_stage1 = build_options.is_stage1 and options.use_stage1;
+ const use_stage1 = build_options.have_stage1 and options.use_stage1;
if (use_llvm and !use_stage1) {
self.llvm_object = try LlvmObject.create(gpa, options);
}
@@ -1448,6 +1448,11 @@ fn linkWithLLD(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node) !v
try argv.append(entry);
}
+ for (self.base.options.force_undefined_symbols.keys()) |symbol| {
+ try argv.append("-u");
+ try argv.append(symbol);
+ }
+
switch (self.base.options.hash_style) {
.gnu => try argv.append("--hash-style=gnu"),
.sysv => try argv.append("--hash-style=sysv"),
@@ -1673,6 +1678,12 @@ fn linkWithLLD(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node) !v
}
}
+ // stack-protector.
+ // Related: https://github.com/ziglang/zig/issues/7265
+ if (comp.libssp_static_lib) |ssp| {
+ try argv.append(ssp.full_object_path);
+ }
+
// compiler-rt
if (compiler_rt_path) |p| {
try argv.append(p);
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index db207af5f5..764e4e71b2 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -270,42 +270,42 @@ pub const Export = struct {
};
pub fn openPath(allocator: Allocator, options: link.Options) !*MachO {
- assert(options.object_format == .macho);
+ assert(options.target.ofmt == .macho);
- const use_stage1 = build_options.is_stage1 and options.use_stage1;
- if (use_stage1 or options.emit == null) {
+ const use_stage1 = build_options.have_stage1 and options.use_stage1;
+ if (use_stage1 or options.emit == null or options.module == null) {
return createEmpty(allocator, options);
}
- const emit = options.emit.?;
- const file = try emit.directory.handle.createFile(emit.sub_path, .{
- .truncate = false,
- .read = true,
- .mode = link.determineMode(options),
- });
- errdefer file.close();
+ const emit = options.emit.?;
const self = try createEmpty(allocator, options);
errdefer {
self.base.file = null;
self.base.destroy();
}
- self.base.file = file;
-
if (build_options.have_llvm and options.use_llvm and options.module != null) {
// TODO this intermediary_basename isn't enough; in the case of `zig build-exe`,
// we also want to put the intermediary object file in the cache while the
// main emit directory is the cwd.
self.base.intermediary_basename = try std.fmt.allocPrint(allocator, "{s}{s}", .{
- emit.sub_path, options.object_format.fileExt(options.target.cpu.arch),
+ emit.sub_path, options.target.ofmt.fileExt(options.target.cpu.arch),
});
}
- if (options.output_mode == .Lib and
- options.link_mode == .Static and self.base.intermediary_basename != null)
- {
- return self;
- }
+ if (self.base.intermediary_basename != null) switch (options.output_mode) {
+ .Obj => return self,
+ .Lib => if (options.link_mode == .Static) return self,
+ else => {},
+ };
+
+ const file = try emit.directory.handle.createFile(emit.sub_path, .{
+ .truncate = false,
+ .read = true,
+ .mode = link.determineMode(options),
+ });
+ errdefer file.close();
+ self.base.file = file;
if (!options.strip and options.module != null) blk: {
// TODO once I add support for converting (and relocating) DWARF info from relocatable
@@ -363,7 +363,7 @@ pub fn createEmpty(gpa: Allocator, options: link.Options) !*MachO {
const cpu_arch = options.target.cpu.arch;
const page_size: u16 = if (cpu_arch == .aarch64) 0x4000 else 0x1000;
const use_llvm = build_options.have_llvm and options.use_llvm;
- const use_stage1 = build_options.is_stage1 and options.use_stage1;
+ const use_stage1 = build_options.have_stage1 and options.use_stage1;
const self = try gpa.create(MachO);
errdefer gpa.destroy(self);
@@ -5315,10 +5315,10 @@ fn writeFunctionStarts(self: *MachO, ncmds: *u32, lc_writer: anytype) !void {
}
fn filterDataInCode(
- dices: []const macho.data_in_code_entry,
+ dices: []align(1) const macho.data_in_code_entry,
start_addr: u64,
end_addr: u64,
-) []const macho.data_in_code_entry {
+) []align(1) const macho.data_in_code_entry {
const Predicate = struct {
addr: u64,
@@ -5825,7 +5825,7 @@ pub fn getEntryPoint(self: MachO) error{MissingMainEntrypoint}!SymbolWithLoc {
return global;
}
-pub fn findFirst(comptime T: type, haystack: []const T, start: usize, predicate: anytype) usize {
+pub fn findFirst(comptime T: type, haystack: []align(1) const T, start: usize, predicate: anytype) usize {
if (!@hasDecl(@TypeOf(predicate), "predicate"))
@compileError("Predicate is required to define fn predicate(@This(), T) bool");
@@ -5861,8 +5861,9 @@ pub fn generateSymbolStabs(
},
else => |e| return e,
};
- const tu_name = try compile_unit.die.getAttrString(&debug_info, dwarf.AT.name);
- const tu_comp_dir = try compile_unit.die.getAttrString(&debug_info, dwarf.AT.comp_dir);
+
+ const tu_name = try compile_unit.die.getAttrString(&debug_info, dwarf.AT.name, debug_info.debug_str, compile_unit.*);
+ const tu_comp_dir = try compile_unit.die.getAttrString(&debug_info, dwarf.AT.comp_dir, debug_info.debug_str, compile_unit.*);
// Open scope
try locals.ensureUnusedCapacity(3);
diff --git a/src/link/MachO/Atom.zig b/src/link/MachO/Atom.zig
index 4871276f3c..dd818ea936 100644
--- a/src/link/MachO/Atom.zig
+++ b/src/link/MachO/Atom.zig
@@ -218,7 +218,7 @@ const RelocContext = struct {
base_offset: i32 = 0,
};
-pub fn parseRelocs(self: *Atom, relocs: []const macho.relocation_info, context: RelocContext) !void {
+pub fn parseRelocs(self: *Atom, relocs: []align(1) const macho.relocation_info, context: RelocContext) !void {
const tracy = trace(@src());
defer tracy.end();
diff --git a/src/link/MachO/DebugSymbols.zig b/src/link/MachO/DebugSymbols.zig
index 3bfe334302..c2aa562db5 100644
--- a/src/link/MachO/DebugSymbols.zig
+++ b/src/link/MachO/DebugSymbols.zig
@@ -63,17 +63,16 @@ pub const Reloc = struct {
pub fn populateMissingMetadata(self: *DebugSymbols, allocator: Allocator) !void {
if (self.linkedit_segment_cmd_index == null) {
self.linkedit_segment_cmd_index = @intCast(u8, self.segments.items.len);
- log.debug("found __LINKEDIT segment free space 0x{x} to 0x{x}", .{
- self.base.page_size,
- self.base.page_size * 2,
- });
+ const fileoff = @intCast(u64, self.base.page_size);
+ const needed_size = @intCast(u64, self.base.page_size) * 2;
+ log.debug("found __LINKEDIT segment free space 0x{x} to 0x{x}", .{ fileoff, needed_size });
// TODO this needs reworking
try self.segments.append(allocator, .{
.segname = makeStaticString("__LINKEDIT"),
- .vmaddr = self.base.page_size,
- .vmsize = self.base.page_size,
- .fileoff = self.base.page_size,
- .filesize = self.base.page_size,
+ .vmaddr = fileoff,
+ .vmsize = needed_size,
+ .fileoff = fileoff,
+ .filesize = needed_size,
.maxprot = macho.PROT.READ,
.initprot = macho.PROT.READ,
.cmdsize = @sizeOf(macho.segment_command_64),
@@ -284,6 +283,7 @@ pub fn flushModule(self: *DebugSymbols, allocator: Allocator, options: link.Opti
const lc_writer = lc_buffer.writer();
var ncmds: u32 = 0;
+ self.updateDwarfSegment();
try self.writeLinkeditSegmentData(&ncmds, lc_writer);
self.updateDwarfSegment();
diff --git a/src/link/MachO/Object.zig b/src/link/MachO/Object.zig
index 996a85ed4b..d99cfae3b7 100644
--- a/src/link/MachO/Object.zig
+++ b/src/link/MachO/Object.zig
@@ -24,7 +24,7 @@ mtime: u64,
contents: []align(@alignOf(u64)) const u8,
header: macho.mach_header_64 = undefined,
-in_symtab: []const macho.nlist_64 = undefined,
+in_symtab: []align(1) const macho.nlist_64 = undefined,
in_strtab: []const u8 = undefined,
symtab: std.ArrayListUnmanaged(macho.nlist_64) = .{},
@@ -99,12 +99,13 @@ pub fn parse(self: *Object, allocator: Allocator, cpu_arch: std.Target.Cpu.Arch)
},
.SYMTAB => {
const symtab = cmd.cast(macho.symtab_command).?;
+ // Sadly, SYMTAB may be at an unaligned offset within the object file.
self.in_symtab = @ptrCast(
- [*]const macho.nlist_64,
- @alignCast(@alignOf(macho.nlist_64), &self.contents[symtab.symoff]),
+ [*]align(1) const macho.nlist_64,
+ self.contents.ptr + symtab.symoff,
)[0..symtab.nsyms];
self.in_strtab = self.contents[symtab.stroff..][0..symtab.strsize];
- try self.symtab.appendSlice(allocator, self.in_symtab);
+ try self.symtab.appendUnalignedSlice(allocator, self.in_symtab);
},
else => {},
}
@@ -196,10 +197,10 @@ fn filterSymbolsByAddress(
}
fn filterRelocs(
- relocs: []const macho.relocation_info,
+ relocs: []align(1) const macho.relocation_info,
start_addr: u64,
end_addr: u64,
-) []const macho.relocation_info {
+) []align(1) const macho.relocation_info {
const Predicate = struct {
addr: u64,
@@ -303,8 +304,8 @@ pub fn splitIntoAtomsOneShot(self: *Object, macho_file: *MachO, object_id: u32)
// Read section's list of relocations
const relocs = @ptrCast(
- [*]const macho.relocation_info,
- @alignCast(@alignOf(macho.relocation_info), &self.contents[sect.reloff]),
+ [*]align(1) const macho.relocation_info,
+ self.contents.ptr + sect.reloff,
)[0..sect.nreloc];
// Symbols within this section only.
@@ -390,7 +391,7 @@ pub fn splitIntoAtomsOneShot(self: *Object, macho_file: *MachO, object_id: u32)
break :blk cc[start..][0..size];
} else null;
const atom_align = if (addr > 0)
- math.min(@ctz(u64, addr), sect.@"align")
+ math.min(@ctz(addr), sect.@"align")
else
sect.@"align";
const atom = try self.createAtomFromSubsection(
@@ -472,7 +473,7 @@ fn createAtomFromSubsection(
size: u64,
alignment: u32,
code: ?[]const u8,
- relocs: []const macho.relocation_info,
+ relocs: []align(1) const macho.relocation_info,
indexes: []const SymbolAtIndex,
match: u8,
sect: macho.section_64,
@@ -538,7 +539,7 @@ pub fn getSourceSection(self: Object, index: u16) macho.section_64 {
return self.sections.items[index];
}
-pub fn parseDataInCode(self: Object) ?[]const macho.data_in_code_entry {
+pub fn parseDataInCode(self: Object) ?[]align(1) const macho.data_in_code_entry {
var it = LoadCommandIterator{
.ncmds = self.header.ncmds,
.buffer = self.contents[@sizeOf(macho.mach_header_64)..][0..self.header.sizeofcmds],
@@ -549,8 +550,8 @@ pub fn parseDataInCode(self: Object) ?[]const macho.data_in_code_entry {
const dice = cmd.cast(macho.linkedit_data_command).?;
const ndice = @divExact(dice.datasize, @sizeOf(macho.data_in_code_entry));
return @ptrCast(
- [*]const macho.data_in_code_entry,
- @alignCast(@alignOf(macho.data_in_code_entry), &self.contents[dice.dataoff]),
+ [*]align(1) const macho.data_in_code_entry,
+ self.contents.ptr + dice.dataoff,
)[0..ndice];
},
else => {},
@@ -579,9 +580,15 @@ pub fn parseDwarfInfo(self: Object) error{Overflow}!dwarf.DwarfInfo {
.debug_info = &[0]u8{},
.debug_abbrev = &[0]u8{},
.debug_str = &[0]u8{},
+ .debug_str_offsets = &[0]u8{},
.debug_line = &[0]u8{},
.debug_line_str = &[0]u8{},
.debug_ranges = &[0]u8{},
+ .debug_loclists = &[0]u8{},
+ .debug_rnglists = &[0]u8{},
+ .debug_addr = &[0]u8{},
+ .debug_names = &[0]u8{},
+ .debug_frame = &[0]u8{},
};
for (self.sections.items) |sect| {
const segname = sect.segName();
@@ -593,12 +600,24 @@ pub fn parseDwarfInfo(self: Object) error{Overflow}!dwarf.DwarfInfo {
di.debug_abbrev = try self.getSectionContents(sect);
} else if (mem.eql(u8, sectname, "__debug_str")) {
di.debug_str = try self.getSectionContents(sect);
+ } else if (mem.eql(u8, sectname, "__debug_str_offsets")) {
+ di.debug_str_offsets = try self.getSectionContents(sect);
} else if (mem.eql(u8, sectname, "__debug_line")) {
di.debug_line = try self.getSectionContents(sect);
} else if (mem.eql(u8, sectname, "__debug_line_str")) {
di.debug_line_str = try self.getSectionContents(sect);
} else if (mem.eql(u8, sectname, "__debug_ranges")) {
di.debug_ranges = try self.getSectionContents(sect);
+ } else if (mem.eql(u8, sectname, "__debug_loclists")) {
+ di.debug_loclists = try self.getSectionContents(sect);
+ } else if (mem.eql(u8, sectname, "__debug_rnglists")) {
+ di.debug_rnglists = try self.getSectionContents(sect);
+ } else if (mem.eql(u8, sectname, "__debug_addr")) {
+ di.debug_addr = try self.getSectionContents(sect);
+ } else if (mem.eql(u8, sectname, "__debug_names")) {
+ di.debug_names = try self.getSectionContents(sect);
+ } else if (mem.eql(u8, sectname, "__debug_frame")) {
+ di.debug_frame = try self.getSectionContents(sect);
}
}
}
diff --git a/src/link/NvPtx.zig b/src/link/NvPtx.zig
index bd86d87201..7bf51c7ad3 100644
--- a/src/link/NvPtx.zig
+++ b/src/link/NvPtx.zig
@@ -57,7 +57,7 @@ pub fn createEmpty(gpa: Allocator, options: link.Options) !*NvPtx {
pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Options) !*NvPtx {
if (!build_options.have_llvm) @panic("nvptx target requires a zig compiler with llvm enabled.");
if (!options.use_llvm) return error.PtxArchNotSupported;
- assert(options.object_format == .nvptx);
+ assert(options.target.ofmt == .nvptx);
const nvptx = try createEmpty(allocator, options);
log.info("Opening .ptx target file {s}", .{sub_path});
diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig
index 16f7841c2d..9a8927dcb4 100644
--- a/src/link/Plan9.zig
+++ b/src/link/Plan9.zig
@@ -657,7 +657,7 @@ pub const base_tag = .plan9;
pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Options) !*Plan9 {
if (options.use_llvm)
return error.LLVMBackendDoesNotSupportPlan9;
- assert(options.object_format == .plan9);
+ assert(options.target.ofmt == .plan9);
const self = try createEmpty(allocator, options);
errdefer self.base.destroy();
diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig
index e295dceb55..b2f6edddfb 100644
--- a/src/link/SpirV.zig
+++ b/src/link/SpirV.zig
@@ -99,7 +99,7 @@ pub fn createEmpty(gpa: Allocator, options: link.Options) !*SpirV {
}
pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Options) !*SpirV {
- assert(options.object_format == .spirv);
+ assert(options.target.ofmt == .spirv);
if (options.use_llvm) return error.LLVM_BackendIsTODO_ForSpirV; // TODO: LLVM Doesn't support SpirV at all.
if (options.use_lld) return error.LLD_LinkingIsTODO_ForSpirV; // TODO: LLD Doesn't support SpirV at all.
diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig
index 1b5d9b3197..df4ac11635 100644
--- a/src/link/Wasm.zig
+++ b/src/link/Wasm.zig
@@ -282,7 +282,7 @@ pub const StringTable = struct {
};
pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Options) !*Wasm {
- assert(options.object_format == .wasm);
+ assert(options.target.ofmt == .wasm);
if (build_options.have_llvm and options.use_llvm) {
return createEmpty(allocator, options);
@@ -356,7 +356,7 @@ pub fn createEmpty(gpa: Allocator, options: link.Options) !*Wasm {
}
const use_llvm = build_options.have_llvm and options.use_llvm;
- const use_stage1 = build_options.is_stage1 and options.use_stage1;
+ const use_stage1 = build_options.have_stage1 and options.use_stage1;
if (use_llvm and !use_stage1) {
self.llvm_object = try LlvmObject.create(gpa, options);
}
@@ -378,7 +378,7 @@ fn parseObjectFile(self: *Wasm, path: []const u8) !bool {
const file = try fs.cwd().openFile(path, .{});
errdefer file.close();
- var object = Object.create(self.base.allocator, file, path) catch |err| switch (err) {
+ var object = Object.create(self.base.allocator, file, path, null) catch |err| switch (err) {
error.InvalidMagicByte, error.NotObjectFile => return false,
else => |e| return e,
};
@@ -463,8 +463,6 @@ fn resolveSymbolsInObject(self: *Wasm, object_index: u16) !void {
continue;
}
- // TODO: Store undefined symbols so we can verify at the end if they've all been found
- // if not, emit an error (unless --allow-undefined is enabled).
const maybe_existing = try self.globals.getOrPut(self.base.allocator, sym_name_index);
if (!maybe_existing.found_existing) {
maybe_existing.value_ptr.* = location;
@@ -483,8 +481,15 @@ fn resolveSymbolsInObject(self: *Wasm, object_index: u16) !void {
break :blk self.objects.items[file].name;
} else self.name;
- if (!existing_sym.isUndefined()) {
- if (!symbol.isUndefined()) {
+ if (!existing_sym.isUndefined()) outer: {
+ if (!symbol.isUndefined()) inner: {
+ if (symbol.isWeak()) {
+ break :inner; // ignore the new symbol (discard it)
+ }
+ if (existing_sym.isWeak()) {
+ break :outer; // existing is weak, while new one isn't. Replace it.
+ }
+ // both are defined and weak, we have a symbol collision.
log.err("symbol '{s}' defined multiple times", .{sym_name});
log.err(" first definition in '{s}'", .{existing_file_path});
log.err(" next definition in '{s}'", .{object.name});
@@ -502,6 +507,53 @@ fn resolveSymbolsInObject(self: *Wasm, object_index: u16) !void {
return error.SymbolMismatchingType;
}
+ if (existing_sym.isUndefined() and symbol.isUndefined()) {
+ const existing_name = if (existing_loc.file) |file_index| blk: {
+ const obj = self.objects.items[file_index];
+ const name_index = obj.findImport(symbol.tag.externalType(), existing_sym.index).module_name;
+ break :blk obj.string_table.get(name_index);
+ } else blk: {
+ const name_index = self.imports.get(existing_loc).?.module_name;
+ break :blk self.string_table.get(name_index);
+ };
+
+ const module_index = object.findImport(symbol.tag.externalType(), symbol.index).module_name;
+ const module_name = object.string_table.get(module_index);
+ if (!mem.eql(u8, existing_name, module_name)) {
+ log.err("symbol '{s}' module name mismatch. Expected '{s}', but found '{s}'", .{
+ sym_name,
+ existing_name,
+ module_name,
+ });
+ log.err(" first definition in '{s}'", .{existing_file_path});
+ log.err(" next definition in '{s}'", .{object.name});
+ return error.ModuleNameMismatch;
+ }
+ }
+
+ if (existing_sym.tag == .global) {
+ const existing_ty = self.getGlobalType(existing_loc);
+ const new_ty = self.getGlobalType(location);
+ if (existing_ty.mutable != new_ty.mutable or existing_ty.valtype != new_ty.valtype) {
+ log.err("symbol '{s}' mismatching global types", .{sym_name});
+ log.err(" first definition in '{s}'", .{existing_file_path});
+ log.err(" next definition in '{s}'", .{object.name});
+ return error.GlobalTypeMismatch;
+ }
+ }
+
+ if (existing_sym.tag == .function) {
+ const existing_ty = self.getFunctionSignature(existing_loc);
+ const new_ty = self.getFunctionSignature(location);
+ if (!existing_ty.eql(new_ty)) {
+ log.err("symbol '{s}' mismatching function signatures.", .{sym_name});
+ log.err(" expected signature {}, but found signature {}", .{ existing_ty, new_ty });
+ log.err(" first definition in '{s}'", .{existing_file_path});
+ log.err(" next definition in '{s}'", .{object.name});
+ return error.FunctionSignatureMismatch;
+ }
+ }
+
// when both symbols are weak, we skip overwriting
if (existing_sym.isWeak() and symbol.isWeak()) {
try self.discarded.put(self.base.allocator, location, existing_loc);
@@ -543,8 +595,8 @@ fn resolveSymbolsInArchives(self: *Wasm) !void {
// Parse object and and resolve symbols again before we check remaining
// undefined symbols.
const object_file_index = @intCast(u16, self.objects.items.len);
- const object = try self.objects.addOne(self.base.allocator);
- object.* = try archive.parseObject(self.base.allocator, offset.items[0]);
+ var object = try archive.parseObject(self.base.allocator, offset.items[0]);
+ try self.objects.append(self.base.allocator, object);
try self.resolveSymbolsInObject(object_file_index);
// continue loop for any remaining undefined symbols that still exist
@@ -797,6 +849,49 @@ fn finishUpdateDecl(self: *Wasm, decl: *Module.Decl, code: []const u8) !void {
try self.resolved_symbols.put(self.base.allocator, atom.symbolLoc(), {});
}
+/// From a given symbol location, returns its `wasm.GlobalType`.
+/// Asserts the Symbol represents a global.
+fn getGlobalType(self: *const Wasm, loc: SymbolLoc) wasm.GlobalType {
+ const symbol = loc.getSymbol(self);
+ assert(symbol.tag == .global);
+ const is_undefined = symbol.isUndefined();
+ if (loc.file) |file_index| {
+ const obj: Object = self.objects.items[file_index];
+ if (is_undefined) {
+ return obj.findImport(.global, symbol.index).kind.global;
+ }
+ const import_global_count = obj.importedCountByKind(.global);
+ return obj.globals[symbol.index - import_global_count].global_type;
+ }
+ if (is_undefined) {
+ return self.imports.get(loc).?.kind.global;
+ }
+ return self.wasm_globals.items[symbol.index].global_type;
+}
+
+/// From a given symbol location, returns its `wasm.Type`.
+/// Asserts the Symbol represents a function.
+fn getFunctionSignature(self: *const Wasm, loc: SymbolLoc) wasm.Type {
+ const symbol = loc.getSymbol(self);
+ assert(symbol.tag == .function);
+ const is_undefined = symbol.isUndefined();
+ if (loc.file) |file_index| {
+ const obj: Object = self.objects.items[file_index];
+ if (is_undefined) {
+ const ty_index = obj.findImport(.function, symbol.index).kind.function;
+ return obj.func_types[ty_index];
+ }
+ const import_function_count = obj.importedCountByKind(.function);
+ const type_index = obj.functions[symbol.index - import_function_count].type_index;
+ return obj.func_types[type_index];
+ }
+ if (is_undefined) {
+ const ty_index = self.imports.get(loc).?.kind.function;
+ return self.func_types.items[ty_index];
+ }
+ return self.func_types.items[self.functions.get(.{ .file = loc.file, .index = loc.index }).?.type_index];
+}
+
/// Lowers a constant typed value to a local symbol and atom.
/// Returns the symbol index of the local
/// The given `decl` is the parent decl whom owns the constant.
@@ -2501,7 +2596,7 @@ fn linkWithLLD(self: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) !
// If there is no Zig code to compile, then we should skip flushing the output file because it
// will not be part of the linker line anyway.
const module_obj_path: ?[]const u8 = if (self.base.options.module) |mod| blk: {
- const use_stage1 = build_options.is_stage1 and self.base.options.use_stage1;
+ const use_stage1 = build_options.have_stage1 and self.base.options.use_stage1;
if (use_stage1) {
const obj_basename = try std.zig.binNameAlloc(arena, .{
.root_name = self.base.options.root_name,
@@ -2711,7 +2806,7 @@ fn linkWithLLD(self: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) !
if (self.base.options.module) |mod| {
// when we use stage1, we use the exports that stage1 provided us.
// For stage2, we can directly retrieve them from the module.
- const use_stage1 = build_options.is_stage1 and self.base.options.use_stage1;
+ const use_stage1 = build_options.have_stage1 and self.base.options.use_stage1;
if (use_stage1) {
for (comp.export_symbol_names.items) |symbol_name| {
try argv.append(try std.fmt.allocPrint(arena, "--export={s}", .{symbol_name}));
@@ -3040,12 +3135,12 @@ fn emitSegmentInfo(self: *Wasm, file: fs.File, arena: Allocator) !void {
for (self.segment_info.items) |segment_info| {
log.debug("Emit segment: {s} align({d}) flags({b})", .{
segment_info.name,
- @ctz(u32, segment_info.alignment),
+ @ctz(segment_info.alignment),
segment_info.flags,
});
try leb.writeULEB128(writer, @intCast(u32, segment_info.name.len));
try writer.writeAll(segment_info.name);
- try leb.writeULEB128(writer, @ctz(u32, segment_info.alignment));
+ try leb.writeULEB128(writer, @ctz(segment_info.alignment));
try leb.writeULEB128(writer, segment_info.flags);
}
diff --git a/src/link/Wasm/Archive.zig b/src/link/Wasm/Archive.zig
index e214d1b124..c80d26d17d 100644
--- a/src/link/Wasm/Archive.zig
+++ b/src/link/Wasm/Archive.zig
@@ -15,6 +15,12 @@ name: []const u8,
header: ar_hdr = undefined,
+/// A list of long file names, delimited by a LF character (0x0a).
+/// This is stored as a single slice of bytes, as the header-names
+/// point to the character index of a file name, rather than the index
+/// in the list.
+long_file_names: []const u8 = undefined,
+
/// Parsed table of contents.
/// Each symbol name points to a list of all definition
/// sites within the current static archive.
@@ -53,32 +59,33 @@ const ar_hdr = extern struct {
/// Always contains ARFMAG.
ar_fmag: [2]u8,
- const NameOrLength = union(enum) {
- Name: []const u8,
- Length: u32,
+ const NameOrIndex = union(enum) {
+ name: []const u8,
+ index: u32,
};
- fn nameOrLength(self: ar_hdr) !NameOrLength {
- const value = getValue(&self.ar_name);
+
+ fn nameOrIndex(archive: ar_hdr) !NameOrIndex {
+ const value = getValue(&archive.ar_name);
const slash_index = mem.indexOfScalar(u8, value, '/') orelse return error.MalformedArchive;
const len = value.len;
if (slash_index == len - 1) {
// Name stored directly
- return NameOrLength{ .Name = value };
+ return NameOrIndex{ .name = value };
} else {
// Name follows the header directly and its length is encoded in
// the name field.
- const length = try std.fmt.parseInt(u32, value[slash_index + 1 ..], 10);
- return NameOrLength{ .Length = length };
+ const index = try std.fmt.parseInt(u32, value[slash_index + 1 ..], 10);
+ return NameOrIndex{ .index = index };
}
}
- fn date(self: ar_hdr) !u64 {
- const value = getValue(&self.ar_date);
+ fn date(archive: ar_hdr) !u64 {
+ const value = getValue(&archive.ar_date);
return std.fmt.parseInt(u64, value, 10);
}
- fn size(self: ar_hdr) !u32 {
- const value = getValue(&self.ar_size);
+ fn size(archive: ar_hdr) !u32 {
+ const value = getValue(&archive.ar_size);
return std.fmt.parseInt(u32, value, 10);
}
@@ -87,18 +94,19 @@ const ar_hdr = extern struct {
}
};
-pub fn deinit(self: *Archive, allocator: Allocator) void {
- for (self.toc.keys()) |*key| {
+pub fn deinit(archive: *Archive, allocator: Allocator) void {
+ for (archive.toc.keys()) |*key| {
allocator.free(key.*);
}
- for (self.toc.values()) |*value| {
+ for (archive.toc.values()) |*value| {
value.deinit(allocator);
}
- self.toc.deinit(allocator);
+ archive.toc.deinit(allocator);
+ allocator.free(archive.long_file_names);
}
-pub fn parse(self: *Archive, allocator: Allocator) !void {
- const reader = self.file.reader();
+pub fn parse(archive: *Archive, allocator: Allocator) !void {
+ const reader = archive.file.reader();
const magic = try reader.readBytesNoEof(SARMAG);
if (!mem.eql(u8, &magic, ARMAG)) {
@@ -106,38 +114,31 @@ pub fn parse(self: *Archive, allocator: Allocator) !void {
return error.NotArchive;
}
- self.header = try reader.readStruct(ar_hdr);
- if (!mem.eql(u8, &self.header.ar_fmag, ARFMAG)) {
- log.debug("invalid header delimiter: expected '{s}', found '{s}'", .{ ARFMAG, self.header.ar_fmag });
+ archive.header = try reader.readStruct(ar_hdr);
+ if (!mem.eql(u8, &archive.header.ar_fmag, ARFMAG)) {
+ log.debug("invalid header delimiter: expected '{s}', found '{s}'", .{ ARFMAG, archive.header.ar_fmag });
return error.NotArchive;
}
- try self.parseTableOfContents(allocator, reader);
+ try archive.parseTableOfContents(allocator, reader);
+ try archive.parseNameTable(allocator, reader);
}
-fn parseName(allocator: Allocator, header: ar_hdr, reader: anytype) ![]u8 {
- const name_or_length = try header.nameOrLength();
- var name: []u8 = undefined;
- switch (name_or_length) {
- .Name => |n| {
- name = try allocator.dupe(u8, n);
- },
- .Length => |len| {
- var n = try allocator.alloc(u8, len);
- defer allocator.free(n);
- try reader.readNoEof(n);
- const actual_len = mem.indexOfScalar(u8, n, @as(u8, 0)) orelse n.len;
- name = try allocator.dupe(u8, n[0..actual_len]);
+fn parseName(archive: *const Archive, header: ar_hdr) ![]const u8 {
+ const name_or_index = try header.nameOrIndex();
+ switch (name_or_index) {
+ .name => |name| return name,
+ .index => |index| {
+ const name = mem.sliceTo(archive.long_file_names[index..], 0x0a);
+ return mem.trimRight(u8, name, "/");
},
}
- return name;
}
-fn parseTableOfContents(self: *Archive, allocator: Allocator, reader: anytype) !void {
- log.debug("parsing table of contents for archive file '{s}'", .{self.name});
+fn parseTableOfContents(archive: *Archive, allocator: Allocator, reader: anytype) !void {
// size field can have extra spaces padded in front as well as the end,
// so we trim those first before parsing the ASCII value.
- const size_trimmed = std.mem.trim(u8, &self.header.ar_size, " ");
+ const size_trimmed = mem.trim(u8, &archive.header.ar_size, " ");
const sym_tab_size = try std.fmt.parseInt(u32, size_trimmed, 10);
const num_symbols = try reader.readIntBig(u32);
@@ -157,7 +158,7 @@ fn parseTableOfContents(self: *Archive, allocator: Allocator, reader: anytype) !
var i: usize = 0;
while (i < sym_tab.len) {
- const string = std.mem.sliceTo(sym_tab[i..], 0);
+ const string = mem.sliceTo(sym_tab[i..], 0);
if (string.len == 0) {
i += 1;
continue;
@@ -165,7 +166,7 @@ fn parseTableOfContents(self: *Archive, allocator: Allocator, reader: anytype) !
i += string.len;
const name = try allocator.dupe(u8, string);
errdefer allocator.free(name);
- const gop = try self.toc.getOrPut(allocator, name);
+ const gop = try archive.toc.getOrPut(allocator, name);
if (gop.found_existing) {
allocator.free(name);
} else {
@@ -175,33 +176,49 @@ fn parseTableOfContents(self: *Archive, allocator: Allocator, reader: anytype) !
}
}
+fn parseNameTable(archive: *Archive, allocator: Allocator, reader: anytype) !void {
+ const header: ar_hdr = try reader.readStruct(ar_hdr);
+ if (!mem.eql(u8, &header.ar_fmag, ARFMAG)) {
+ log.err("invalid header delimiter: expected '{s}', found '{s}'", .{ ARFMAG, header.ar_fmag });
+ return error.MalformedArchive;
+ }
+ if (!mem.eql(u8, header.ar_name[0..2], "//")) {
+ log.err("invalid archive. Long name table missing", .{});
+ return error.MalformedArchive;
+ }
+ const table_size = try header.size();
+ const long_file_names = try allocator.alloc(u8, table_size);
+ errdefer allocator.free(long_file_names);
+ try reader.readNoEof(long_file_names);
+ archive.long_file_names = long_file_names;
+}
+
/// From a given file offset, starts reading for a file header.
/// When found, parses the object file into an `Object` and returns it.
-pub fn parseObject(self: Archive, allocator: Allocator, file_offset: u32) !Object {
- try self.file.seekTo(file_offset);
- const reader = self.file.reader();
+pub fn parseObject(archive: Archive, allocator: Allocator, file_offset: u32) !Object {
+ try archive.file.seekTo(file_offset);
+ const reader = archive.file.reader();
const header = try reader.readStruct(ar_hdr);
- const current_offset = try self.file.getPos();
- try self.file.seekTo(0);
+ const current_offset = try archive.file.getPos();
+ try archive.file.seekTo(0);
if (!mem.eql(u8, &header.ar_fmag, ARFMAG)) {
log.err("invalid header delimiter: expected '{s}', found '{s}'", .{ ARFMAG, header.ar_fmag });
return error.MalformedArchive;
}
- const object_name = try parseName(allocator, header, reader);
- defer allocator.free(object_name);
-
+ const object_name = try archive.parseName(header);
const name = name: {
var buffer: [std.fs.MAX_PATH_BYTES]u8 = undefined;
- const path = try std.os.realpath(self.name, &buffer);
+ const path = try std.os.realpath(archive.name, &buffer);
break :name try std.fmt.allocPrint(allocator, "{s}({s})", .{ path, object_name });
};
defer allocator.free(name);
- const object_file = try std.fs.cwd().openFile(self.name, .{});
+ const object_file = try std.fs.cwd().openFile(archive.name, .{});
errdefer object_file.close();
+ const object_file_size = try header.size();
try object_file.seekTo(current_offset);
- return Object.create(allocator, object_file, name);
+ return Object.create(allocator, object_file, name, object_file_size);
}
diff --git a/src/link/Wasm/Object.zig b/src/link/Wasm/Object.zig
index a1308ec045..50827ca9fb 100644
--- a/src/link/Wasm/Object.zig
+++ b/src/link/Wasm/Object.zig
@@ -105,14 +105,33 @@ pub const InitError = error{NotObjectFile} || ParseError || std.fs.File.ReadErro
/// Initializes a new `Object` from a wasm object file.
/// This also parses and verifies the object file.
-pub fn create(gpa: Allocator, file: std.fs.File, name: []const u8) InitError!Object {
+/// When a max size is given, will only parse up to the given size,
+/// else will read until the end of the file.
+pub fn create(gpa: Allocator, file: std.fs.File, name: []const u8, maybe_max_size: ?usize) InitError!Object {
var object: Object = .{
.file = file,
.name = try gpa.dupe(u8, name),
};
var is_object_file: bool = false;
- try object.parse(gpa, file.reader(), &is_object_file);
+ const size = maybe_max_size orelse size: {
+ errdefer gpa.free(object.name);
+ const stat = try file.stat();
+ break :size @intCast(usize, stat.size);
+ };
+
+ const file_contents = try gpa.alloc(u8, size);
+ defer gpa.free(file_contents);
+ var file_reader = file.reader();
+ var read: usize = 0;
+ while (read < size) {
+ const n = try file_reader.read(file_contents[read..]);
+ std.debug.assert(n != 0);
+ read += n;
+ }
+ var fbs = std.io.fixedBufferStream(file_contents);
+
+ try object.parse(gpa, fbs.reader(), &is_object_file);
errdefer object.deinit(gpa);
if (!is_object_file) return error.NotObjectFile;
diff --git a/src/main.zig b/src/main.zig
index f192137b3c..dd3a7e797b 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -378,6 +378,8 @@ const usage_build_generic =
\\ -fno-lto Force-disable Link Time Optimization
\\ -fstack-check Enable stack probing in unsafe builds
\\ -fno-stack-check Disable stack probing in safe builds
+ \\ -fstack-protector Enable stack protection in unsafe builds
+ \\ -fno-stack-protector Disable stack protection in safe builds
\\ -fsanitize-c Enable C undefined behavior detection in unsafe builds
\\ -fno-sanitize-c Disable C undefined behavior detection in safe builds
\\ -fvalgrind Include valgrind client requests in release builds
@@ -668,6 +670,7 @@ fn buildOutputType(
var want_unwind_tables: ?bool = null;
var want_sanitize_c: ?bool = null;
var want_stack_check: ?bool = null;
+ var want_stack_protector: ?u32 = null;
var want_red_zone: ?bool = null;
var omit_frame_pointer: ?bool = null;
var want_valgrind: ?bool = null;
@@ -718,7 +721,7 @@ fn buildOutputType(
var test_filter: ?[]const u8 = null;
var test_name_prefix: ?[]const u8 = null;
var override_local_cache_dir: ?[]const u8 = try optionalStringEnvVar(arena, "ZIG_LOCAL_CACHE_DIR");
- var override_global_cache_dir: ?[]const u8 = null;
+ var override_global_cache_dir: ?[]const u8 = try optionalStringEnvVar(arena, "ZIG_GLOBAL_CACHE_DIR");
var override_lib_dir: ?[]const u8 = try optionalStringEnvVar(arena, "ZIG_LIB_DIR");
var main_pkg_path: ?[]const u8 = null;
var clang_preprocessor_mode: Compilation.ClangPreprocessorMode = .no;
@@ -1168,6 +1171,10 @@ fn buildOutputType(
want_stack_check = true;
} else if (mem.eql(u8, arg, "-fno-stack-check")) {
want_stack_check = false;
+ } else if (mem.eql(u8, arg, "-fstack-protector")) {
+ want_stack_protector = Compilation.default_stack_protector_buffer_size;
+ } else if (mem.eql(u8, arg, "-fno-stack-protector")) {
+ want_stack_protector = 0;
} else if (mem.eql(u8, arg, "-mred-zone")) {
want_red_zone = true;
} else if (mem.eql(u8, arg, "-mno-red-zone")) {
@@ -1521,6 +1528,12 @@ fn buildOutputType(
.no_color_diagnostics => color = .off,
.stack_check => want_stack_check = true,
.no_stack_check => want_stack_check = false,
+ .stack_protector => {
+ if (want_stack_protector == null) {
+ want_stack_protector = Compilation.default_stack_protector_buffer_size;
+ }
+ },
+ .no_stack_protector => want_stack_protector = 0,
.unwind_tables => want_unwind_tables = true,
.no_unwind_tables => want_unwind_tables = false,
.nostdlib => ensure_libc_on_non_freestanding = false,
@@ -1657,7 +1670,8 @@ fn buildOutputType(
disable_c_depfile = true;
try clang_argv.appendSlice(it.other_args);
},
- .dep_file_mm => { // -MM
+ .dep_file_to_stdout => { // -M, -MM
+ // "Like -MD, but also implies -E and writes to stdout by default"
// "Like -MMD, but also implies -E and writes to stdout by default"
c_out_mode = .preprocessor;
disable_c_depfile = true;
@@ -2191,6 +2205,7 @@ fn buildOutputType(
.arch_os_abi = target_arch_os_abi,
.cpu_features = target_mcpu,
.dynamic_linker = target_dynamic_linker,
+ .object_format = target_ofmt,
};
// Before passing the mcpu string in for parsing, we convert any -m flags that were
@@ -2493,28 +2508,7 @@ fn buildOutputType(
}
}
- const object_format: std.Target.ObjectFormat = blk: {
- const ofmt = target_ofmt orelse break :blk target_info.target.getObjectFormat();
- if (mem.eql(u8, ofmt, "elf")) {
- break :blk .elf;
- } else if (mem.eql(u8, ofmt, "c")) {
- break :blk .c;
- } else if (mem.eql(u8, ofmt, "coff")) {
- break :blk .coff;
- } else if (mem.eql(u8, ofmt, "macho")) {
- break :blk .macho;
- } else if (mem.eql(u8, ofmt, "wasm")) {
- break :blk .wasm;
- } else if (mem.eql(u8, ofmt, "hex")) {
- break :blk .hex;
- } else if (mem.eql(u8, ofmt, "raw")) {
- break :blk .raw;
- } else if (mem.eql(u8, ofmt, "spirv")) {
- break :blk .spirv;
- } else {
- fatal("unsupported object format: {s}", .{ofmt});
- }
- };
+ const object_format = target_info.target.ofmt;
if (output_mode == .Obj and (object_format == .coff or object_format == .macho)) {
const total_obj_count = c_source_files.items.len +
@@ -2568,7 +2562,6 @@ fn buildOutputType(
.target = target_info.target,
.output_mode = output_mode,
.link_mode = link_mode,
- .object_format = object_format,
.version = optional_version,
}),
},
@@ -2858,7 +2851,6 @@ fn buildOutputType(
.emit_implib = emit_implib_resolved.data,
.link_mode = link_mode,
.dll_export_fns = dll_export_fns,
- .object_format = object_format,
.optimize_mode = optimize_mode,
.keep_source_files_loaded = false,
.clang_argv = clang_argv.items,
@@ -2880,6 +2872,7 @@ fn buildOutputType(
.want_unwind_tables = want_unwind_tables,
.want_sanitize_c = want_sanitize_c,
.want_stack_check = want_stack_check,
+ .want_stack_protector = want_stack_protector,
.want_red_zone = want_red_zone,
.omit_frame_pointer = omit_frame_pointer,
.want_valgrind = want_valgrind,
@@ -2996,7 +2989,7 @@ fn buildOutputType(
return std.io.getStdOut().writeAll(try comp.generateBuiltinZigSource(arena));
}
if (arg_mode == .translate_c) {
- const stage1_mode = use_stage1 orelse build_options.is_stage1;
+ const stage1_mode = use_stage1 orelse false;
return cmdTranslateC(comp, arena, have_enable_cache, stage1_mode);
}
@@ -3172,11 +3165,11 @@ fn parseCrossTargetOrReportFatalError(
for (diags.arch.?.allCpuModels()) |cpu| {
help_text.writer().print(" {s}\n", .{cpu.name}) catch break :help;
}
- std.log.info("Available CPUs for architecture '{s}':\n{s}", .{
+ std.log.info("available CPUs for architecture '{s}':\n{s}", .{
@tagName(diags.arch.?), help_text.items,
});
}
- fatal("Unknown CPU: '{s}'", .{diags.cpu_name.?});
+ fatal("unknown CPU: '{s}'", .{diags.cpu_name.?});
},
error.UnknownCpuFeature => {
help: {
@@ -3185,11 +3178,26 @@ fn parseCrossTargetOrReportFatalError(
for (diags.arch.?.allFeaturesList()) |feature| {
help_text.writer().print(" {s}: {s}\n", .{ feature.name, feature.description }) catch break :help;
}
- std.log.info("Available CPU features for architecture '{s}':\n{s}", .{
+ std.log.info("available CPU features for architecture '{s}':\n{s}", .{
@tagName(diags.arch.?), help_text.items,
});
}
- fatal("Unknown CPU feature: '{s}'", .{diags.unknown_feature_name.?});
+ fatal("unknown CPU feature: '{s}'", .{diags.unknown_feature_name.?});
+ },
+ error.UnknownObjectFormat => {
+ {
+ var help_text = std.ArrayList(u8).init(allocator);
+ defer help_text.deinit();
+ inline for (@typeInfo(std.Target.ObjectFormat).Enum.fields) |field| {
+ help_text.writer().print(" {s}\n", .{field.name}) catch
+ // TODO change this back to `break :help`
+ // this working around a stage1 bug.
+ //break :help;
+ @panic("out of memory");
+ }
+ std.log.info("available object formats:\n{s}", .{help_text.items});
+ }
+ fatal("unknown object format: '{s}'", .{opts.object_format.?});
},
else => |e| return e,
};
@@ -3359,7 +3367,7 @@ fn updateModule(gpa: Allocator, comp: *Compilation, hook: AfterUpdateHook) !void
// If a .pdb file is part of the expected output, we must also copy
// it into place here.
- const is_coff = comp.bin_file.options.object_format == .coff;
+ const is_coff = comp.bin_file.options.target.ofmt == .coff;
const have_pdb = is_coff and !comp.bin_file.options.strip;
if (have_pdb) {
// Replace `.out` or `.exe` with `.pdb` on both the source and destination
@@ -4226,6 +4234,7 @@ const FmtError = error{
NotOpenForWriting,
UnsupportedEncoding,
ConnectionResetByPeer,
+ LockViolation,
} || fs.File.OpenError;
fn fmtPath(fmt: *Fmt, file_path: []const u8, check_mode: bool, dir: fs.Dir, sub_path: []const u8) FmtError!void {
@@ -4652,7 +4661,7 @@ pub const ClangArgIterator = struct {
lib_dir,
mcpu,
dep_file,
- dep_file_mm,
+ dep_file_to_stdout,
framework_dir,
framework,
nostdlibinc,
@@ -4668,6 +4677,8 @@ pub const ClangArgIterator = struct {
no_color_diagnostics,
stack_check,
no_stack_check,
+ stack_protector,
+ no_stack_protector,
strip,
exec_model,
emit_llvm,
diff --git a/src/mingw.zig b/src/mingw.zig
index e99a1af8fc..b50cc4b009 100644
--- a/src/mingw.zig
+++ b/src/mingw.zig
@@ -93,12 +93,6 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
"-D_WIN32_WINNT=0x0f00",
"-D__MSVCRT_VERSION__=0x700",
});
- if (std.mem.eql(u8, dep, "tlssup.c") and comp.bin_file.options.lto) {
- // LLD will incorrectly drop the `_tls_index` symbol. Here we work
- // around it by not using LTO for this one file.
- // https://github.com/ziglang/zig/issues/8531
- try args.append("-fno-lto");
- }
c_source_files[i] = .{
.src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
"libc", "mingw", "crt", dep,
diff --git a/src/musl.zig b/src/musl.zig
index 68b524b415..12ff530f8e 100644
--- a/src/musl.zig
+++ b/src/musl.zig
@@ -215,6 +215,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
.optimize_mode = comp.compilerRtOptMode(),
.want_sanitize_c = false,
.want_stack_check = false,
+ .want_stack_protector = 0,
.want_red_zone = comp.bin_file.options.red_zone,
.omit_frame_pointer = comp.bin_file.options.omit_frame_pointer,
.want_valgrind = false,
diff --git a/src/print_air.zig b/src/print_air.zig
index ec4a94b420..04dec25f5f 100644
--- a/src/print_air.zig
+++ b/src/print_air.zig
@@ -170,6 +170,7 @@ const Writer = struct {
.bool_to_int,
.ret,
.ret_load,
+ .is_named_enum_value,
.tag_name,
.error_name,
.sqrt,
@@ -242,6 +243,7 @@ const Writer = struct {
.popcount,
.byte_swap,
.bit_reverse,
+ .error_set_has_value,
=> try w.writeTyOp(s, inst),
.block,
diff --git a/src/print_zir.zig b/src/print_zir.zig
index 6e33154bbd..f315d7f014 100644
--- a/src/print_zir.zig
+++ b/src/print_zir.zig
@@ -214,7 +214,6 @@ const Writer = struct {
.trunc,
.round,
.tag_name,
- .reify,
.type_name,
.frame_type,
.frame_size,
@@ -247,7 +246,6 @@ const Writer = struct {
.validate_array_init_ty => try self.writeValidateArrayInitTy(stream, inst),
.array_type_sentinel => try self.writeArrayTypeSentinel(stream, inst),
- .param_type => try self.writeParamType(stream, inst),
.ptr_type => try self.writePtrType(stream, inst),
.int => try self.writeInt(stream, inst),
.int_big => try self.writeIntBig(stream, inst),
@@ -500,6 +498,7 @@ const Writer = struct {
.wasm_memory_size,
.error_to_int,
.int_to_error,
+ .reify,
=> {
const inst_data = self.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const src = LazySrcLoc.nodeOffset(inst_data.node);
@@ -605,16 +604,6 @@ const Writer = struct {
try self.writeSrc(stream, inst_data.src());
}
- fn writeParamType(
- self: *Writer,
- stream: anytype,
- inst: Zir.Inst.Index,
- ) (@TypeOf(stream).Error || error{OutOfMemory})!void {
- const inst_data = self.code.instructions.items(.data)[inst].param_type;
- try self.writeInstRef(stream, inst_data.callee);
- try stream.print(", {d})", .{inst_data.param_index});
- }
-
fn writePtrType(
self: *Writer,
stream: anytype,
@@ -1158,7 +1147,8 @@ const Writer = struct {
fn writeCall(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
const inst_data = self.code.instructions.items(.data)[inst].pl_node;
const extra = self.code.extraData(Zir.Inst.Call, inst_data.payload_index);
- const args = self.code.refSlice(extra.end, extra.data.flags.args_len);
+ const args_len = extra.data.flags.args_len;
+ const body = self.code.extra[extra.end..];
if (extra.data.flags.ensure_result_used) {
try stream.writeAll("nodiscard ");
@@ -1166,10 +1156,27 @@ const Writer = struct {
try stream.print(".{s}, ", .{@tagName(@intToEnum(std.builtin.CallOptions.Modifier, extra.data.flags.packed_modifier))});
try self.writeInstRef(stream, extra.data.callee);
try stream.writeAll(", [");
- for (args) |arg, i| {
- if (i != 0) try stream.writeAll(", ");
- try self.writeInstRef(stream, arg);
+
+ self.indent += 2;
+ if (args_len != 0) {
+ try stream.writeAll("\n");
}
+ var i: usize = 0;
+ var arg_start: u32 = args_len;
+ while (i < args_len) : (i += 1) {
+ try stream.writeByteNTimes(' ', self.indent);
+ const arg_end = self.code.extra[extra.end + i];
+ defer arg_start = arg_end;
+ const arg_body = body[arg_start..arg_end];
+ try self.writeBracedBody(stream, arg_body);
+
+ try stream.writeAll(",\n");
+ }
+ self.indent -= 2;
+ if (args_len != 0) {
+ try stream.writeByteNTimes(' ', self.indent);
+ }
+
try stream.writeAll("]) ");
try self.writeSrc(stream, inst_data.src());
}
@@ -1238,13 +1245,36 @@ const Writer = struct {
try self.writeFlag(stream, "known_non_opv, ", small.known_non_opv);
try self.writeFlag(stream, "known_comptime_only, ", small.known_comptime_only);
- try stream.print("{s}, {s}, ", .{
- @tagName(small.name_strategy), @tagName(small.layout),
- });
+
+ try stream.print("{s}, ", .{@tagName(small.name_strategy)});
+
+ if (small.layout == .Packed and small.has_backing_int) {
+ const backing_int_body_len = self.code.extra[extra_index];
+ extra_index += 1;
+ try stream.writeAll("Packed(");
+ if (backing_int_body_len == 0) {
+ const backing_int_ref = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]);
+ extra_index += 1;
+ try self.writeInstRef(stream, backing_int_ref);
+ } else {
+ const body = self.code.extra[extra_index..][0..backing_int_body_len];
+ extra_index += backing_int_body_len;
+ self.indent += 2;
+ try self.writeBracedDecl(stream, body);
+ self.indent -= 2;
+ }
+ try stream.writeAll("), ");
+ } else {
+ try stream.print("{s}, ", .{@tagName(small.layout)});
+ }
if (decls_len == 0) {
try stream.writeAll("{}, ");
} else {
+ const prev_parent_decl_node = self.parent_decl_node;
+ if (src_node) |off| self.parent_decl_node = self.relativeToNodeIndex(off);
+ defer self.parent_decl_node = prev_parent_decl_node;
+
try stream.writeAll("{\n");
self.indent += 2;
extra_index = try self.writeDecls(stream, decls_len, extra_index);
@@ -1413,23 +1443,32 @@ const Writer = struct {
try self.writeFlag(stream, "autoenum, ", small.auto_enum_tag);
if (decls_len == 0) {
- try stream.writeAll("{}, ");
+ try stream.writeAll("{}");
} else {
+ const prev_parent_decl_node = self.parent_decl_node;
+ if (src_node) |off| self.parent_decl_node = self.relativeToNodeIndex(off);
+ defer self.parent_decl_node = prev_parent_decl_node;
+
try stream.writeAll("{\n");
self.indent += 2;
extra_index = try self.writeDecls(stream, decls_len, extra_index);
self.indent -= 2;
try stream.writeByteNTimes(' ', self.indent);
- try stream.writeAll("}, ");
+ try stream.writeAll("}");
}
- assert(fields_len != 0);
-
if (tag_type_ref != .none) {
- try self.writeInstRef(stream, tag_type_ref);
try stream.writeAll(", ");
+ try self.writeInstRef(stream, tag_type_ref);
}
+ if (fields_len == 0) {
+ try stream.writeAll("})");
+ try self.writeSrcNode(stream, src_node);
+ return;
+ }
+ try stream.writeAll(", ");
+
const body = self.code.extra[extra_index..][0..body_len];
extra_index += body.len;
@@ -1662,6 +1701,10 @@ const Writer = struct {
if (decls_len == 0) {
try stream.writeAll("{}, ");
} else {
+ const prev_parent_decl_node = self.parent_decl_node;
+ if (src_node) |off| self.parent_decl_node = self.relativeToNodeIndex(off);
+ defer self.parent_decl_node = prev_parent_decl_node;
+
try stream.writeAll("{\n");
self.indent += 2;
extra_index = try self.writeDecls(stream, decls_len, extra_index);
@@ -1678,13 +1721,13 @@ const Writer = struct {
const body = self.code.extra[extra_index..][0..body_len];
extra_index += body.len;
+ const prev_parent_decl_node = self.parent_decl_node;
+ if (src_node) |off| self.parent_decl_node = self.relativeToNodeIndex(off);
+ try self.writeBracedDecl(stream, body);
if (fields_len == 0) {
- assert(body.len == 0);
- try stream.writeAll("{}, {})");
+ try stream.writeAll(", {})");
+ self.parent_decl_node = prev_parent_decl_node;
} else {
- const prev_parent_decl_node = self.parent_decl_node;
- if (src_node) |off| self.parent_decl_node = self.relativeToNodeIndex(off);
- try self.writeBracedDecl(stream, body);
try stream.writeAll(", {\n");
self.indent += 2;
@@ -1755,6 +1798,10 @@ const Writer = struct {
if (decls_len == 0) {
try stream.writeAll("{})");
} else {
+ const prev_parent_decl_node = self.parent_decl_node;
+ if (src_node) |off| self.parent_decl_node = self.relativeToNodeIndex(off);
+ defer self.parent_decl_node = prev_parent_decl_node;
+
try stream.writeAll("{\n");
self.indent += 2;
_ = try self.writeDecls(stream, decls_len, extra_index);
diff --git a/src/stage1.zig b/src/stage1.zig
index f400053b0f..e3f0daaa44 100644
--- a/src/stage1.zig
+++ b/src/stage1.zig
@@ -18,7 +18,7 @@ const target_util = @import("target.zig");
comptime {
assert(builtin.link_libc);
- assert(build_options.is_stage1);
+ assert(build_options.have_stage1);
assert(build_options.have_llvm);
if (!builtin.is_test) {
@export(main, .{ .name = "main" });
@@ -416,7 +416,7 @@ export fn stage2_add_link_lib(
const target = comp.getTarget();
const is_libc = target_util.is_libc_lib_name(target, lib_name);
if (is_libc) {
- if (!comp.bin_file.options.link_libc) {
+ if (!comp.bin_file.options.link_libc and !comp.bin_file.options.parent_compilation_link_libc) {
return "dependency on libc must be explicitly specified in the build command";
}
return null;
diff --git a/src/stage1/all_types.hpp b/src/stage1/all_types.hpp
index a17ddbdcc2..5f216fe388 100644
--- a/src/stage1/all_types.hpp
+++ b/src/stage1/all_types.hpp
@@ -1116,6 +1116,7 @@ struct AstNodeContainerDecl {
ContainerLayout layout;
bool auto_enum, is_root; // union(enum)
+ bool unsupported_explicit_backing_int;
};
struct AstNodeErrorSetField {
diff --git a/src/stage1/analyze.cpp b/src/stage1/analyze.cpp
index e814f87d68..f0cad841be 100644
--- a/src/stage1/analyze.cpp
+++ b/src/stage1/analyze.cpp
@@ -3034,6 +3034,12 @@ static Error resolve_struct_zero_bits(CodeGen *g, ZigType *struct_type) {
AstNode *decl_node = struct_type->data.structure.decl_node;
+ if (decl_node->data.container_decl.unsupported_explicit_backing_int) {
+ add_node_error(g, decl_node, buf_create_from_str(
+ "the stage1 compiler does not support explicit backing integer types on packed structs"));
+ return ErrorSemanticAnalyzeFail;
+ }
+
if (struct_type->data.structure.resolve_loop_flag_zero_bits) {
if (struct_type->data.structure.resolve_status != ResolveStatusInvalid) {
struct_type->data.structure.resolve_status = ResolveStatusInvalid;
diff --git a/src/stage1/astgen.cpp b/src/stage1/astgen.cpp
index 367bed69cf..54d9c969a5 100644
--- a/src/stage1/astgen.cpp
+++ b/src/stage1/astgen.cpp
@@ -5374,10 +5374,8 @@ static Stage1ZirInst *astgen_builtin_fn_call(Stage1AstGen *ag, Scope *scope, Ast
if (arg0_value == ag->codegen->invalid_inst_src)
return arg0_value;
- AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
- Stage1ZirInst *arg1_value = astgen_node(ag, arg1_node, scope);
- if (arg1_value == ag->codegen->invalid_inst_src)
- return arg1_value;
+ Stage1ZirInst *arg1_value = arg0_value;
+ arg0_value = ir_build_typeof_1(ag, scope, arg0_node, arg1_value);
Stage1ZirInst *result;
switch (builtin_fn->id) {
diff --git a/src/stage1/codegen.cpp b/src/stage1/codegen.cpp
index 93a5bae2d1..55aa73a3b7 100644
--- a/src/stage1/codegen.cpp
+++ b/src/stage1/codegen.cpp
@@ -9977,11 +9977,11 @@ static void define_builtin_fns(CodeGen *g) {
create_builtin_fn(g, BuiltinFnIdCInclude, "cInclude", 1);
create_builtin_fn(g, BuiltinFnIdCDefine, "cDefine", 2);
create_builtin_fn(g, BuiltinFnIdCUndef, "cUndef", 1);
- create_builtin_fn(g, BuiltinFnIdCtz, "ctz", 2);
- create_builtin_fn(g, BuiltinFnIdClz, "clz", 2);
- create_builtin_fn(g, BuiltinFnIdPopCount, "popCount", 2);
- create_builtin_fn(g, BuiltinFnIdBswap, "byteSwap", 2);
- create_builtin_fn(g, BuiltinFnIdBitReverse, "bitReverse", 2);
+ create_builtin_fn(g, BuiltinFnIdCtz, "ctz", 1);
+ create_builtin_fn(g, BuiltinFnIdClz, "clz", 1);
+ create_builtin_fn(g, BuiltinFnIdPopCount, "popCount", 1);
+ create_builtin_fn(g, BuiltinFnIdBswap, "byteSwap", 1);
+ create_builtin_fn(g, BuiltinFnIdBitReverse, "bitReverse", 1);
create_builtin_fn(g, BuiltinFnIdImport, "import", 1);
create_builtin_fn(g, BuiltinFnIdCImport, "cImport", 1);
create_builtin_fn(g, BuiltinFnIdErrName, "errorName", 1);
@@ -10261,13 +10261,13 @@ Buf *codegen_generate_builtin_source(CodeGen *g) {
buf_appendf(contents, "pub const single_threaded = %s;\n", bool_to_str(g->is_single_threaded));
buf_appendf(contents, "pub const abi = std.Target.Abi.%s;\n", cur_abi);
buf_appendf(contents, "pub const cpu = std.Target.Cpu.baseline(.%s);\n", cur_arch);
- buf_appendf(contents, "pub const stage2_arch: std.Target.Cpu.Arch = .%s;\n", cur_arch);
buf_appendf(contents, "pub const os = std.Target.Os.Tag.defaultVersionRange(.%s, .%s);\n", cur_os, cur_arch);
buf_appendf(contents,
"pub const target = std.Target{\n"
" .cpu = cpu,\n"
" .os = os,\n"
" .abi = abi,\n"
+ " .ofmt = object_format,\n"
"};\n"
);
diff --git a/src/stage1/ir.cpp b/src/stage1/ir.cpp
index e31715030c..a5428945a9 100644
--- a/src/stage1/ir.cpp
+++ b/src/stage1/ir.cpp
@@ -18640,7 +18640,7 @@ static Error ir_make_type_info_value(IrAnalyze *ira, Scope *scope, AstNode *sour
result->special = ConstValSpecialStatic;
result->type = ir_type_info_get_type(ira, "Struct", nullptr);
- ZigValue **fields = alloc_const_vals_ptrs(g, 4);
+ ZigValue **fields = alloc_const_vals_ptrs(g, 5);
result->data.x_struct.fields = fields;
// layout: ContainerLayout
@@ -18648,8 +18648,17 @@ static Error ir_make_type_info_value(IrAnalyze *ira, Scope *scope, AstNode *sour
fields[0]->special = ConstValSpecialStatic;
fields[0]->type = ir_type_info_get_type(ira, "ContainerLayout", nullptr);
bigint_init_unsigned(&fields[0]->data.x_enum_tag, type_entry->data.structure.layout);
+
+ // backing_integer: ?type
+ ensure_field_index(result->type, "backing_integer", 1);
+ fields[1]->special = ConstValSpecialStatic;
+ fields[1]->type = get_optional_type(g, g->builtin_types.entry_type);
+ // This is always null in stage1, as stage1 does not support explicit backing integers
+ // for packed structs.
+ fields[1]->data.x_optional = nullptr;
+
// fields: []Type.StructField
- ensure_field_index(result->type, "fields", 1);
+ ensure_field_index(result->type, "fields", 2);
ZigType *type_info_struct_field_type = ir_type_info_get_type(ira, "StructField", nullptr);
if ((err = type_resolve(g, type_info_struct_field_type, ResolveStatusSizeKnown))) {
@@ -18663,7 +18672,7 @@ static Error ir_make_type_info_value(IrAnalyze *ira, Scope *scope, AstNode *sour
struct_field_array->data.x_array.special = ConstArraySpecialNone;
struct_field_array->data.x_array.data.s_none.elements = g->pass1_arena->allocate(struct_field_count);
- init_const_slice(g, fields[1], struct_field_array, 0, struct_field_count, false, nullptr);
+ init_const_slice(g, fields[2], struct_field_array, 0, struct_field_count, false, nullptr);
for (uint32_t struct_field_index = 0; struct_field_index < struct_field_count; struct_field_index++) {
TypeStructField *struct_field = type_entry->data.structure.fields[struct_field_index];
@@ -18710,18 +18719,18 @@ static Error ir_make_type_info_value(IrAnalyze *ira, Scope *scope, AstNode *sour
struct_field_val->parent.data.p_array.elem_index = struct_field_index;
}
// decls: []Type.Declaration
- ensure_field_index(result->type, "decls", 2);
- if ((err = ir_make_type_info_decls(ira, source_node, fields[2],
+ ensure_field_index(result->type, "decls", 3);
+ if ((err = ir_make_type_info_decls(ira, source_node, fields[3],
type_entry->data.structure.decls_scope, false)))
{
return err;
}
// is_tuple: bool
- ensure_field_index(result->type, "is_tuple", 3);
- fields[3]->special = ConstValSpecialStatic;
- fields[3]->type = g->builtin_types.entry_bool;
- fields[3]->data.x_bool = is_tuple(type_entry);
+ ensure_field_index(result->type, "is_tuple", 4);
+ fields[4]->special = ConstValSpecialStatic;
+ fields[4]->type = g->builtin_types.entry_bool;
+ fields[4]->data.x_bool = is_tuple(type_entry);
break;
}
@@ -19313,7 +19322,14 @@ static ZigType *type_info_to_type(IrAnalyze *ira, Scope *scope, AstNode *source_
assert(layout_value->type == ir_type_info_get_type(ira, "ContainerLayout", nullptr));
ContainerLayout layout = (ContainerLayout)bigint_as_u32(&layout_value->data.x_enum_tag);
- ZigValue *fields_value = get_const_field(ira, source_node, payload, "fields", 1);
+ ZigType *tag_type = get_const_field_meta_type_optional(ira, source_node, payload, "backing_integer", 1);
+ if (tag_type != nullptr) {
+ ir_add_error_node(ira, source_node, buf_create_from_str(
+ "the stage1 compiler does not support explicit backing integer types on packed structs"));
+ return ira->codegen->invalid_inst_gen->value->type;
+ }
+
+ ZigValue *fields_value = get_const_field(ira, source_node, payload, "fields", 2);
if (fields_value == nullptr)
return ira->codegen->invalid_inst_gen->value->type;
assert(fields_value->special == ConstValSpecialStatic);
@@ -19322,7 +19338,7 @@ static ZigType *type_info_to_type(IrAnalyze *ira, Scope *scope, AstNode *source_
ZigValue *fields_len_value = fields_value->data.x_struct.fields[slice_len_index];
size_t fields_len = bigint_as_usize(&fields_len_value->data.x_bigint);
- ZigValue *decls_value = get_const_field(ira, source_node, payload, "decls", 2);
+ ZigValue *decls_value = get_const_field(ira, source_node, payload, "decls", 3);
if (decls_value == nullptr)
return ira->codegen->invalid_inst_gen->value->type;
assert(decls_value->special == ConstValSpecialStatic);
@@ -19335,7 +19351,7 @@ static ZigType *type_info_to_type(IrAnalyze *ira, Scope *scope, AstNode *source_
}
bool is_tuple;
- if ((err = get_const_field_bool(ira, source_node, payload, "is_tuple", 3, &is_tuple)))
+ if ((err = get_const_field_bool(ira, source_node, payload, "is_tuple", 4, &is_tuple)))
return ira->codegen->invalid_inst_gen->value->type;
ZigType *entry = new_type_table_entry(ZigTypeIdStruct);
diff --git a/src/stage1/parser.cpp b/src/stage1/parser.cpp
index fdc0777aff..bd778484cb 100644
--- a/src/stage1/parser.cpp
+++ b/src/stage1/parser.cpp
@@ -2902,16 +2902,25 @@ static AstNode *ast_parse_container_decl_auto(ParseContext *pc) {
}
// ContainerDeclType
-// <- KEYWORD_struct
+// <- KEYWORD_struct (LPAREN Expr RPAREN)?
// / KEYWORD_enum (LPAREN Expr RPAREN)?
// / KEYWORD_union (LPAREN (KEYWORD_enum (LPAREN Expr RPAREN)? / Expr) RPAREN)?
// / KEYWORD_opaque
static AstNode *ast_parse_container_decl_type(ParseContext *pc) {
TokenIndex first = eat_token_if(pc, TokenIdKeywordStruct);
if (first != 0) {
+ bool explicit_backing_int = false;
+ if (eat_token_if(pc, TokenIdLParen) != 0) {
+ explicit_backing_int = true;
+ ast_expect(pc, ast_parse_expr);
+ expect_token(pc, TokenIdRParen);
+ }
AstNode *res = ast_create_node(pc, NodeTypeContainerDecl, first);
res->data.container_decl.init_arg_expr = nullptr;
res->data.container_decl.kind = ContainerKindStruct;
+ // We want this to be an error in semantic analysis not parsing to make sharing
+ // the test suite between stage1 and self hosted easier.
+ res->data.container_decl.unsupported_explicit_backing_int = explicit_backing_int;
return res;
}
diff --git a/src/target.zig b/src/target.zig
index 730c82a602..55238a6e86 100644
--- a/src/target.zig
+++ b/src/target.zig
@@ -321,6 +321,15 @@ pub fn supportsStackProbing(target: std.Target) bool {
(target.cpu.arch == .i386 or target.cpu.arch == .x86_64);
}
+pub fn supportsStackProtector(target: std.Target) bool {
+ // TODO: investigate whether stack-protector works on wasm
+ return !target.isWasm();
+}
+
+pub fn libcProvidesStackProtector(target: std.Target) bool {
+ return !target.isMinGW() and target.os.tag != .wasi;
+}
+
pub fn supportsReturnAddress(target: std.Target) bool {
return switch (target.cpu.arch) {
.wasm32, .wasm64 => target.os.tag == .emscripten,
diff --git a/src/test.zig b/src/test.zig
index 5f4107a402..babded13f9 100644
--- a/src/test.zig
+++ b/src/test.zig
@@ -25,7 +25,7 @@ const skip_stage1 = builtin.zig_backend != .stage1 or build_options.skip_stage1;
const hr = "=" ** 80;
test {
- if (build_options.is_stage1) {
+ if (build_options.have_stage1) {
@import("stage1.zig").os_init();
}
@@ -606,7 +606,6 @@ pub const TestContext = struct {
output_mode: std.builtin.OutputMode,
optimize_mode: std.builtin.Mode = .Debug,
updates: std.ArrayList(Update),
- object_format: ?std.Target.ObjectFormat = null,
emit_h: bool = false,
is_test: bool = false,
expect_exact: bool = false,
@@ -782,12 +781,13 @@ pub const TestContext = struct {
pub fn exeFromCompiledC(ctx: *TestContext, name: []const u8, target: CrossTarget) *Case {
const prefixed_name = std.fmt.allocPrint(ctx.arena, "CBE: {s}", .{name}) catch
@panic("out of memory");
+ var target_adjusted = target;
+ target_adjusted.ofmt = std.Target.ObjectFormat.c;
ctx.cases.append(Case{
.name = prefixed_name,
- .target = target,
+ .target = target_adjusted,
.updates = std.ArrayList(Update).init(ctx.cases.allocator),
.output_mode = .Exe,
- .object_format = .c,
.files = std.ArrayList(File).init(ctx.arena),
}) catch @panic("out of memory");
return &ctx.cases.items[ctx.cases.items.len - 1];
@@ -851,12 +851,13 @@ pub const TestContext = struct {
/// Adds a test case for Zig or ZIR input, producing C code.
pub fn addC(ctx: *TestContext, name: []const u8, target: CrossTarget) *Case {
+ var target_adjusted = target;
+ target_adjusted.ofmt = std.Target.ObjectFormat.c;
ctx.cases.append(Case{
.name = name,
- .target = target,
+ .target = target_adjusted,
.updates = std.ArrayList(Update).init(ctx.cases.allocator),
.output_mode = .Obj,
- .object_format = .c,
.files = std.ArrayList(File).init(ctx.arena),
}) catch @panic("out of memory");
return &ctx.cases.items[ctx.cases.items.len - 1];
@@ -1224,10 +1225,6 @@ pub const TestContext = struct {
try aux_thread_pool.init(self.gpa);
defer aux_thread_pool.deinit();
- var case_thread_pool: ThreadPool = undefined;
- try case_thread_pool.init(self.gpa);
- defer case_thread_pool.deinit();
-
// Use the same global cache dir for all the tests, such that we for example don't have to
// rebuild musl libc for every case (when LLVM backend is enabled).
var global_tmp = std.testing.tmpDir(.{});
@@ -1245,9 +1242,6 @@ pub const TestContext = struct {
defer self.gpa.free(global_cache_directory.path.?);
{
- var wait_group: WaitGroup = .{};
- defer wait_group.wait();
-
for (self.cases.items) |*case| {
if (build_options.skip_non_native) {
if (case.target.getCpuArch() != builtin.cpu.arch)
@@ -1267,17 +1261,19 @@ pub const TestContext = struct {
if (std.mem.indexOf(u8, case.name, test_filter) == null) continue;
}
- wait_group.start();
- try case_thread_pool.spawn(workerRunOneCase, .{
+ var prg_node = root_node.start(case.name, case.updates.items.len);
+ prg_node.activate();
+ defer prg_node.end();
+
+ case.result = runOneCase(
self.gpa,
- root_node,
- case,
+ &prg_node,
+ case.*,
zig_lib_directory,
&aux_thread_pool,
global_cache_directory,
host,
- &wait_group,
- });
+ );
}
}
@@ -1295,33 +1291,6 @@ pub const TestContext = struct {
}
}
- fn workerRunOneCase(
- gpa: Allocator,
- root_node: *std.Progress.Node,
- case: *Case,
- zig_lib_directory: Compilation.Directory,
- thread_pool: *ThreadPool,
- global_cache_directory: Compilation.Directory,
- host: std.zig.system.NativeTargetInfo,
- wait_group: *WaitGroup,
- ) void {
- defer wait_group.finish();
-
- var prg_node = root_node.start(case.name, case.updates.items.len);
- prg_node.activate();
- defer prg_node.end();
-
- case.result = runOneCase(
- gpa,
- &prg_node,
- case.*,
- zig_lib_directory,
- thread_pool,
- global_cache_directory,
- host,
- );
- }
-
fn runOneCase(
allocator: Allocator,
root_node: *std.Progress.Node,
@@ -1533,7 +1502,6 @@ pub const TestContext = struct {
.root_name = "test_case",
.target = target,
.output_mode = case.output_mode,
- .object_format = case.object_format,
});
const emit_directory: Compilation.Directory = .{
@@ -1569,7 +1537,6 @@ pub const TestContext = struct {
.emit_h = emit_h,
.main_pkg = &main_pkg,
.keep_source_files_loaded = true,
- .object_format = case.object_format,
.is_native_os = case.target.isNativeOs(),
.is_native_abi = case.target.isNativeAbi(),
.dynamic_linker = target_info.dynamic_linker.get(),
@@ -1814,7 +1781,7 @@ pub const TestContext = struct {
".." ++ ss ++ "{s}" ++ ss ++ "{s}",
.{ &tmp.sub_path, bin_name },
);
- if (case.object_format != null and case.object_format.? == .c) {
+ if (case.target.ofmt != null and case.target.ofmt.? == .c) {
if (host.getExternalExecutor(target_info, .{ .link_libc = true }) != .native) {
// We wouldn't be able to run the compiled C code.
continue :update; // Pass test.
diff --git a/src/translate_c.zig b/src/translate_c.zig
index 97e47d84f3..b0fae81475 100644
--- a/src/translate_c.zig
+++ b/src/translate_c.zig
@@ -439,6 +439,24 @@ pub fn translate(
return ast.render(gpa, context.global_scope.nodes.items);
}
+/// Determines whether macro is of the form: `#define FOO FOO` (Possibly with trailing tokens)
+/// Macros of this form will not be translated.
+fn isSelfDefinedMacro(unit: *const clang.ASTUnit, c: *const Context, macro: *const clang.MacroDefinitionRecord) bool {
+ const source = getMacroText(unit, c, macro);
+ var tokenizer = std.c.Tokenizer{
+ .buffer = source,
+ };
+ const name_tok = tokenizer.next();
+ const name = source[name_tok.start..name_tok.end];
+
+ const first_tok = tokenizer.next();
+ // We do not just check for `.Identifier` below because keyword tokens are preferentially matched first by
+ // the tokenizer.
+ // In other words we would miss `#define inline inline` (`inline` is a valid c89 identifier)
+ if (first_tok.id == .Eof) return false;
+ return mem.eql(u8, name, source[first_tok.start..first_tok.end]);
+}
+
fn prepopulateGlobalNameTable(ast_unit: *clang.ASTUnit, c: *Context) !void {
if (!ast_unit.visitLocalTopLevelDecls(c, declVisitorNamesOnlyC)) {
return error.OutOfMemory;
@@ -455,7 +473,10 @@ fn prepopulateGlobalNameTable(ast_unit: *clang.ASTUnit, c: *Context) !void {
const macro = @ptrCast(*clang.MacroDefinitionRecord, entity);
const raw_name = macro.getName_getNameStart();
const name = try c.str(raw_name);
- try c.global_names.put(c.gpa, name, {});
+
+ if (!isSelfDefinedMacro(ast_unit, c, macro)) {
+ try c.global_names.put(c.gpa, name, {});
+ }
},
else => {},
}
@@ -4001,8 +4022,7 @@ fn transCPtrCast(
// For opaque types a ptrCast is enough
expr
else blk: {
- const child_type_node = try transQualType(c, scope, child_type, loc);
- const alignof = try Tag.std_meta_alignment.create(c.arena, child_type_node);
+ const alignof = try Tag.std_meta_alignment.create(c.arena, dst_type_node);
const align_cast = try Tag.align_cast.create(c.arena, .{ .lhs = alignof, .rhs = expr });
break :blk align_cast;
};
@@ -5447,6 +5467,16 @@ fn tokenizeMacro(source: []const u8, tok_list: *std.ArrayList(CToken)) Error!voi
}
}
+fn getMacroText(unit: *const clang.ASTUnit, c: *const Context, macro: *const clang.MacroDefinitionRecord) []const u8 {
+ const begin_loc = macro.getSourceRange_getBegin();
+ const end_loc = clang.Lexer.getLocForEndOfToken(macro.getSourceRange_getEnd(), c.source_manager, unit);
+
+ const begin_c = c.source_manager.getCharacterData(begin_loc);
+ const end_c = c.source_manager.getCharacterData(end_loc);
+ const slice_len = @ptrToInt(end_c) - @ptrToInt(begin_c);
+ return begin_c[0..slice_len];
+}
+
fn transPreprocessorEntities(c: *Context, unit: *clang.ASTUnit) Error!void {
// TODO if we see #undef, delete it from the table
var it = unit.getLocalPreprocessingEntities_begin();
@@ -5463,22 +5493,18 @@ fn transPreprocessorEntities(c: *Context, unit: *clang.ASTUnit) Error!void {
const macro = @ptrCast(*clang.MacroDefinitionRecord, entity);
const raw_name = macro.getName_getNameStart();
const begin_loc = macro.getSourceRange_getBegin();
- const end_loc = clang.Lexer.getLocForEndOfToken(macro.getSourceRange_getEnd(), c.source_manager, unit);
const name = try c.str(raw_name);
if (scope.containsNow(name)) {
continue;
}
- const begin_c = c.source_manager.getCharacterData(begin_loc);
- const end_c = c.source_manager.getCharacterData(end_loc);
- const slice_len = @ptrToInt(end_c) - @ptrToInt(begin_c);
- const slice = begin_c[0..slice_len];
+ const source = getMacroText(unit, c, macro);
- try tokenizeMacro(slice, &tok_list);
+ try tokenizeMacro(source, &tok_list);
var macro_ctx = MacroCtx{
- .source = slice,
+ .source = source,
.list = tok_list.items,
.name = name,
.loc = begin_loc,
@@ -5491,7 +5517,8 @@ fn transPreprocessorEntities(c: *Context, unit: *clang.ASTUnit) Error!void {
// if it equals itself, ignore. for example, from stdio.h:
// #define stdin stdin
const tok = macro_ctx.list[1];
- if (mem.eql(u8, name, slice[tok.start..tok.end])) {
+ if (mem.eql(u8, name, source[tok.start..tok.end])) {
+ assert(!c.global_names.contains(source[tok.start..tok.end]));
continue;
}
},
@@ -5648,7 +5675,7 @@ fn parseCNumLit(c: *Context, m: *MacroCtx) ParseError!Node {
switch (m.list[m.i].id) {
.IntegerLiteral => |suffix| {
var radix: []const u8 = "decimal";
- if (lit_bytes.len > 2 and lit_bytes[0] == '0') {
+ if (lit_bytes.len >= 2 and lit_bytes[0] == '0') {
switch (lit_bytes[1]) {
'0'...'7' => {
// Octal
diff --git a/src/type.zig b/src/type.zig
index a585c830fb..d516015d39 100644
--- a/src/type.zig
+++ b/src/type.zig
@@ -2310,6 +2310,8 @@ pub const Type = extern union {
/// fields will count towards the ABI size. For example, `struct {T: type, x: i32}`
/// hasRuntimeBits()=true and abiSize()=4
/// * the type has only one possible value, making its ABI size 0.
+ /// - an enum with an explicit tag type has the ABI size of the integer tag type,
+ /// making it one-possible-value only if the integer tag type has 0 bits.
/// When `ignore_comptime_only` is true, then types that are comptime only
/// may return false positives.
pub fn hasRuntimeBitsAdvanced(
@@ -2376,6 +2378,32 @@ pub const Type = extern union {
.error_set_merged,
=> return true,
+ // Pointers to zero-bit types still have a runtime address; however, pointers
+ // to comptime-only types do not, with the exception of function pointers.
+ .anyframe_T,
+ .optional_single_mut_pointer,
+ .optional_single_const_pointer,
+ .single_const_pointer,
+ .single_mut_pointer,
+ .many_const_pointer,
+ .many_mut_pointer,
+ .c_const_pointer,
+ .c_mut_pointer,
+ .const_slice,
+ .mut_slice,
+ .pointer,
+ => {
+ if (ignore_comptime_only) {
+ return true;
+ } else if (ty.childType().zigTypeTag() == .Fn) {
+ return !ty.childType().fnInfo().is_generic;
+ } else if (sema_kit) |sk| {
+ return !(try sk.sema.typeRequiresComptime(sk.block, sk.src, ty));
+ } else {
+ return !comptimeOnly(ty);
+ }
+ },
+
// These are false because they are comptime-only types.
.single_const_pointer_to_comptime_int,
.void,
@@ -2399,30 +2427,6 @@ pub const Type = extern union {
.fn_ccc_void_no_args,
=> return false,
- // These types have more than one possible value, so the result is the same as
- // asking whether they are comptime-only types.
- .anyframe_T,
- .optional_single_mut_pointer,
- .optional_single_const_pointer,
- .single_const_pointer,
- .single_mut_pointer,
- .many_const_pointer,
- .many_mut_pointer,
- .c_const_pointer,
- .c_mut_pointer,
- .const_slice,
- .mut_slice,
- .pointer,
- => {
- if (ignore_comptime_only) {
- return true;
- } else if (sema_kit) |sk| {
- return !(try sk.sema.typeRequiresComptime(sk.block, sk.src, ty));
- } else {
- return !comptimeOnly(ty);
- }
- },
-
.optional => {
var buf: Payload.ElemType = undefined;
const child_ty = ty.optionalChild(&buf);
@@ -2450,9 +2454,9 @@ pub const Type = extern union {
_ = try sk.sema.resolveTypeFields(sk.block, sk.src, ty);
}
assert(struct_obj.haveFieldTypes());
- for (struct_obj.fields.values()) |value| {
- if (value.is_comptime) continue;
- if (try value.ty.hasRuntimeBitsAdvanced(ignore_comptime_only, sema_kit))
+ for (struct_obj.fields.values()) |field| {
+ if (field.is_comptime) continue;
+ if (try field.ty.hasRuntimeBitsAdvanced(ignore_comptime_only, sema_kit))
return true;
} else {
return false;
@@ -2461,7 +2465,7 @@ pub const Type = extern union {
.enum_full => {
const enum_full = ty.castTag(.enum_full).?.data;
- return enum_full.fields.count() >= 2;
+ return enum_full.tag_ty.hasRuntimeBitsAdvanced(ignore_comptime_only, sema_kit);
},
.enum_simple => {
const enum_simple = ty.castTag(.enum_simple).?.data;
@@ -2491,6 +2495,7 @@ pub const Type = extern union {
if (try union_obj.tag_ty.hasRuntimeBitsAdvanced(ignore_comptime_only, sema_kit)) {
return true;
}
+
if (sema_kit) |sk| {
_ = try sk.sema.resolveTypeFields(sk.block, sk.src, ty);
}
@@ -3000,9 +3005,17 @@ pub const Type = extern union {
.lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) },
};
if (struct_obj.layout == .Packed) {
- var buf: Type.Payload.Bits = undefined;
- const int_ty = struct_obj.packedIntegerType(target, &buf);
- return AbiAlignmentAdvanced{ .scalar = int_ty.abiAlignment(target) };
+ switch (strat) {
+ .sema_kit => |sk| try sk.sema.resolveTypeLayout(sk.block, sk.src, ty),
+ .lazy => |arena| {
+ if (!struct_obj.haveLayout()) {
+ return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) };
+ }
+ },
+ .eager => {},
+ }
+ assert(struct_obj.haveLayout());
+ return AbiAlignmentAdvanced{ .scalar = struct_obj.backing_int_ty.abiAlignment(target) };
}
const fields = ty.structFields();
@@ -3021,6 +3034,15 @@ pub const Type = extern union {
},
};
big_align = @maximum(big_align, field_align);
+
+ // This logic is duplicated in Module.Struct.Field.alignment.
+ if (struct_obj.layout == .Extern or target.ofmt == .c) {
+ if (field.ty.isAbiInt() and field.ty.intInfo(target).bits >= 128) {
+ // The C ABI requires 128 bit integer fields of structs
+ // to be 16-bytes aligned.
+ big_align = @maximum(big_align, 16);
+ }
+ }
}
return AbiAlignmentAdvanced{ .scalar = big_align };
},
@@ -3105,6 +3127,13 @@ pub const Type = extern union {
.sema_kit => unreachable, // handled above
.lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) },
};
+ if (union_obj.fields.count() == 0) {
+ if (have_tag) {
+ return abiAlignmentAdvanced(union_obj.tag_ty, target, strat);
+ } else {
+ return AbiAlignmentAdvanced{ .scalar = @boolToInt(union_obj.layout == .Extern) };
+ }
+ }
var max_align: u32 = 0;
if (have_tag) max_align = union_obj.tag_ty.abiAlignment(target);
@@ -3192,17 +3221,16 @@ pub const Type = extern union {
.Packed => {
const struct_obj = ty.castTag(.@"struct").?.data;
switch (strat) {
- .sema_kit => |sk| _ = try sk.sema.resolveTypeFields(sk.block, sk.src, ty),
+ .sema_kit => |sk| try sk.sema.resolveTypeLayout(sk.block, sk.src, ty),
.lazy => |arena| {
- if (!struct_obj.haveFieldTypes()) {
+ if (!struct_obj.haveLayout()) {
return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) };
}
},
.eager => {},
}
- var buf: Type.Payload.Bits = undefined;
- const int_ty = struct_obj.packedIntegerType(target, &buf);
- return AbiSizeAdvanced{ .scalar = int_ty.abiSize(target) };
+ assert(struct_obj.haveLayout());
+ return AbiSizeAdvanced{ .scalar = struct_obj.backing_int_ty.abiSize(target) };
},
else => {
switch (strat) {
@@ -3253,8 +3281,8 @@ pub const Type = extern union {
.array_u8 => return AbiSizeAdvanced{ .scalar = ty.castTag(.array_u8).?.data },
.array_u8_sentinel_0 => return AbiSizeAdvanced{ .scalar = ty.castTag(.array_u8_sentinel_0).?.data + 1 },
- .array, .vector => {
- const payload = ty.cast(Payload.Array).?.data;
+ .array => {
+ const payload = ty.castTag(.array).?.data;
switch (try payload.elem_type.abiSizeAdvanced(target, strat)) {
.scalar => |elem_size| return AbiSizeAdvanced{ .scalar = payload.len * elem_size },
.val => switch (strat) {
@@ -3276,6 +3304,28 @@ pub const Type = extern union {
}
},
+ .vector => {
+ const payload = ty.castTag(.vector).?.data;
+ const sema_kit = switch (strat) {
+ .sema_kit => |sk| sk,
+ .eager => null,
+ .lazy => |arena| return AbiSizeAdvanced{
+ .val = try Value.Tag.lazy_size.create(arena, ty),
+ },
+ };
+ const elem_bits = try payload.elem_type.bitSizeAdvanced(target, sema_kit);
+ const total_bits = elem_bits * payload.len;
+ const total_bytes = (total_bits + 7) / 8;
+ const alignment = switch (try ty.abiAlignmentAdvanced(target, strat)) {
+ .scalar => |x| x,
+ .val => return AbiSizeAdvanced{
+ .val = try Value.Tag.lazy_size.create(strat.lazy, ty),
+ },
+ };
+ const result = std.mem.alignForwardGeneric(u64, total_bytes, alignment);
+ return AbiSizeAdvanced{ .scalar = result };
+ },
+
.isize,
.usize,
.@"anyframe",
@@ -3319,7 +3369,13 @@ pub const Type = extern union {
.f128 => return AbiSizeAdvanced{ .scalar = 16 },
.f80 => switch (target.cpu.arch) {
- .i386 => return AbiSizeAdvanced{ .scalar = 12 },
+ .i386 => switch (target.os.tag) {
+ .windows => switch (target.abi) {
+ .msvc => return AbiSizeAdvanced{ .scalar = 16 },
+ else => return AbiSizeAdvanced{ .scalar = 12 },
+ },
+ else => return AbiSizeAdvanced{ .scalar = 12 },
+ },
.x86_64 => return AbiSizeAdvanced{ .scalar = 16 },
else => {
var payload: Payload.Bits = .{
@@ -4236,11 +4292,18 @@ pub const Type = extern union {
pub fn unionFieldType(ty: Type, enum_tag: Value, mod: *Module) Type {
const union_obj = ty.cast(Payload.Union).?.data;
- const index = union_obj.tag_ty.enumTagFieldIndex(enum_tag, mod).?;
+ const index = ty.unionTagFieldIndex(enum_tag, mod).?;
assert(union_obj.haveFieldTypes());
return union_obj.fields.values()[index].ty;
}
+ pub fn unionTagFieldIndex(ty: Type, enum_tag: Value, mod: *Module) ?usize {
+ const union_obj = ty.cast(Payload.Union).?.data;
+ const index = union_obj.tag_ty.enumTagFieldIndex(enum_tag, mod) orelse return null;
+ const name = union_obj.tag_ty.enumFieldName(index);
+ return union_obj.fields.getIndex(name);
+ }
+
pub fn unionHasAllZeroBitFieldTypes(ty: Type) bool {
return ty.cast(Payload.Union).?.data.hasAllZeroBitFieldTypes();
}
@@ -4530,6 +4593,12 @@ pub const Type = extern union {
.vector => ty = ty.castTag(.vector).?.data.elem_type,
+ .@"struct" => {
+ const struct_obj = ty.castTag(.@"struct").?.data;
+ assert(struct_obj.layout == .Packed);
+ ty = struct_obj.backing_int_ty;
+ },
+
else => unreachable,
};
}
@@ -4910,33 +4979,38 @@ pub const Type = extern union {
const s = ty.castTag(.@"struct").?.data;
assert(s.haveFieldTypes());
for (s.fields.values()) |field| {
- if (field.ty.onePossibleValue() == null) {
- return null;
- }
+ if (field.is_comptime) continue;
+ if (field.ty.onePossibleValue() != null) continue;
+ return null;
}
return Value.initTag(.empty_struct_value);
},
.tuple, .anon_struct => {
const tuple = ty.tupleFields();
- for (tuple.values) |val| {
- if (val.tag() == .unreachable_value) {
- return null; // non-comptime field
- }
+ for (tuple.values) |val, i| {
+ const is_comptime = val.tag() != .unreachable_value;
+ if (is_comptime) continue;
+ if (tuple.types[i].onePossibleValue() != null) continue;
+ return null;
}
return Value.initTag(.empty_struct_value);
},
.enum_numbered => {
const enum_numbered = ty.castTag(.enum_numbered).?.data;
- if (enum_numbered.fields.count() == 1) {
- return enum_numbered.values.keys()[0];
- } else {
+ // An explicit tag type is always provided for enum_numbered.
+ if (enum_numbered.tag_ty.hasRuntimeBits()) {
return null;
}
+ assert(enum_numbered.fields.count() == 1);
+ return enum_numbered.values.keys()[0];
},
.enum_full => {
const enum_full = ty.castTag(.enum_full).?.data;
+ if (enum_full.tag_ty.hasRuntimeBits()) {
+ return null;
+ }
if (enum_full.fields.count() == 1) {
if (enum_full.values.count() == 0) {
return Value.zero;
@@ -5271,7 +5345,8 @@ pub const Type = extern union {
.enum_numbered => return ty.castTag(.enum_numbered).?.data.tag_ty,
.enum_simple => {
const enum_simple = ty.castTag(.enum_simple).?.data;
- const bits = std.math.log2_int_ceil(usize, enum_simple.fields.count());
+ const field_count = enum_simple.fields.count();
+ const bits: u16 = if (field_count == 0) 0 else std.math.log2_int_ceil(usize, field_count);
buffer.* = .{
.base = .{ .tag = .int_unsigned },
.data = bits,
@@ -5492,7 +5567,7 @@ pub const Type = extern union {
.@"struct" => {
const struct_obj = ty.castTag(.@"struct").?.data;
assert(struct_obj.layout != .Packed);
- return struct_obj.fields.values()[index].normalAlignment(target);
+ return struct_obj.fields.values()[index].alignment(target, struct_obj.layout);
},
.@"union", .union_safety_tagged, .union_tagged => {
const union_obj = ty.cast(Payload.Union).?.data;
@@ -5591,19 +5666,22 @@ pub const Type = extern union {
target: Target,
pub fn next(it: *StructOffsetIterator) ?FieldOffset {
- if (it.struct_obj.fields.count() <= it.field)
+ const i = it.field;
+ if (it.struct_obj.fields.count() <= i)
return null;
- const field = it.struct_obj.fields.values()[it.field];
- defer it.field += 1;
- if (!field.ty.hasRuntimeBits() or field.is_comptime)
- return FieldOffset{ .field = it.field, .offset = it.offset };
+ const field = it.struct_obj.fields.values()[i];
+ it.field += 1;
- const field_align = field.normalAlignment(it.target);
+ if (field.is_comptime or !field.ty.hasRuntimeBits()) {
+ return FieldOffset{ .field = i, .offset = it.offset };
+ }
+
+ const field_align = field.alignment(it.target, it.struct_obj.layout);
it.big_align = @maximum(it.big_align, field_align);
- it.offset = std.mem.alignForwardGeneric(u64, it.offset, field_align);
- defer it.offset += field.ty.abiSize(it.target);
- return FieldOffset{ .field = it.field, .offset = it.offset };
+ const field_offset = std.mem.alignForwardGeneric(u64, it.offset, field_align);
+ it.offset = field_offset + field.ty.abiSize(it.target);
+ return FieldOffset{ .field = i, .offset = field_offset };
}
};
@@ -5771,50 +5849,6 @@ pub const Type = extern union {
}
}
- pub fn getNodeOffset(ty: Type) i32 {
- switch (ty.tag()) {
- .enum_full, .enum_nonexhaustive => {
- const enum_full = ty.cast(Payload.EnumFull).?.data;
- return enum_full.node_offset;
- },
- .enum_numbered => return ty.castTag(.enum_numbered).?.data.node_offset,
- .enum_simple => {
- const enum_simple = ty.castTag(.enum_simple).?.data;
- return enum_simple.node_offset;
- },
- .@"struct" => {
- const struct_obj = ty.castTag(.@"struct").?.data;
- return struct_obj.node_offset;
- },
- .error_set => {
- const error_set = ty.castTag(.error_set).?.data;
- return error_set.node_offset;
- },
- .@"union", .union_safety_tagged, .union_tagged => {
- const union_obj = ty.cast(Payload.Union).?.data;
- return union_obj.node_offset;
- },
- .@"opaque" => {
- const opaque_obj = ty.cast(Payload.Opaque).?.data;
- return opaque_obj.node_offset;
- },
- .atomic_order,
- .atomic_rmw_op,
- .calling_convention,
- .address_space,
- .float_mode,
- .reduce_op,
- .call_options,
- .prefetch_options,
- .export_options,
- .extern_options,
- .type_info,
- => unreachable, // These need to be resolved earlier.
-
- else => unreachable,
- }
- }
-
/// This enum does not directly correspond to `std.builtin.TypeId` because
/// it has extra enum tags in it, as a way of using less memory. For example,
/// even though Zig recognizes `*align(10) i32` and `*i32` both as Pointer types
@@ -6345,6 +6379,8 @@ pub const Type = extern union {
pub const @"anyopaque" = initTag(.anyopaque);
pub const @"null" = initTag(.@"null");
+ pub const err_int = Type.u16;
+
pub fn ptr(arena: Allocator, mod: *Module, data: Payload.Pointer.Data) !Type {
const target = mod.getTarget();
@@ -6535,6 +6571,11 @@ pub const CType = enum {
.long, .ulong => return 32,
.longlong, .ulonglong, .longdouble => return 64,
},
+ .avr => switch (self) {
+ .short, .ushort, .int, .uint => return 16,
+ .long, .ulong, .longdouble => return 32,
+ .longlong, .ulonglong => return 64,
+ },
else => switch (self) {
.short, .ushort => return 16,
.int, .uint => return 32,
@@ -6573,31 +6614,42 @@ pub const CType = enum {
.emscripten,
.plan9,
.solaris,
- => switch (self) {
- .short, .ushort => return 16,
- .int, .uint => return 32,
- .long, .ulong => return target.cpu.arch.ptrBitWidth(),
- .longlong, .ulonglong => return 64,
- .longdouble => switch (target.cpu.arch) {
- .i386, .x86_64 => return 80,
+ .haiku,
+ .ananas,
+ .fuchsia,
+ .minix,
+ => switch (target.cpu.arch) {
+ .avr => switch (self) {
+ .short, .ushort, .int, .uint => return 16,
+ .long, .ulong, .longdouble => return 32,
+ .longlong, .ulonglong => return 64,
+ },
+ else => switch (self) {
+ .short, .ushort => return 16,
+ .int, .uint => return 32,
+ .long, .ulong => return target.cpu.arch.ptrBitWidth(),
+ .longlong, .ulonglong => return 64,
+ .longdouble => switch (target.cpu.arch) {
+ .i386, .x86_64 => return 80,
- .riscv64,
- .aarch64,
- .aarch64_be,
- .aarch64_32,
- .s390x,
- .mips64,
- .mips64el,
- .sparc,
- .sparc64,
- .sparcel,
- .powerpc,
- .powerpcle,
- .powerpc64,
- .powerpc64le,
- => return 128,
+ .riscv64,
+ .aarch64,
+ .aarch64_be,
+ .aarch64_32,
+ .s390x,
+ .mips64,
+ .mips64el,
+ .sparc,
+ .sparc64,
+ .sparcel,
+ .powerpc,
+ .powerpcle,
+ .powerpc64,
+ .powerpc64le,
+ => return 128,
- else => return 64,
+ else => return 64,
+ },
},
},
@@ -6617,14 +6669,10 @@ pub const CType = enum {
},
},
- .ananas,
.cloudabi,
- .fuchsia,
.kfreebsd,
.lv2,
.zos,
- .haiku,
- .minix,
.rtems,
.nacl,
.aix,
diff --git a/src/value.zig b/src/value.zig
index 3994040ba6..a1961b40f7 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -1194,6 +1194,16 @@ pub const Value = extern union {
return switch (self.tag()) {
.bool_true, .one => true,
.bool_false, .zero => false,
+ .int_u64 => switch (self.castTag(.int_u64).?.data) {
+ 0 => false,
+ 1 => true,
+ else => unreachable,
+ },
+ .int_i64 => switch (self.castTag(.int_i64).?.data) {
+ 0 => false,
+ 1 => true,
+ else => unreachable,
+ },
else => unreachable,
};
}
@@ -1572,7 +1582,7 @@ pub const Value = extern union {
.one, .bool_true => return ty_bits - 1,
.int_u64 => {
- const big = @clz(u64, val.castTag(.int_u64).?.data);
+ const big = @clz(val.castTag(.int_u64).?.data);
return big + ty_bits - 64;
},
.int_i64 => {
@@ -1589,7 +1599,7 @@ pub const Value = extern union {
while (i != 0) {
i -= 1;
const limb = bigint.limbs[i];
- const this_limb_lz = @clz(std.math.big.Limb, limb);
+ const this_limb_lz = @clz(limb);
total_limb_lz += this_limb_lz;
if (this_limb_lz != bits_per_limb) break;
}
@@ -1616,7 +1626,7 @@ pub const Value = extern union {
.one, .bool_true => return 0,
.int_u64 => {
- const big = @ctz(u64, val.castTag(.int_u64).?.data);
+ const big = @ctz(val.castTag(.int_u64).?.data);
return if (big == 64) ty_bits else big;
},
.int_i64 => {
@@ -1628,7 +1638,7 @@ pub const Value = extern union {
// Limbs are stored in little-endian order.
var result: u64 = 0;
for (bigint.limbs) |limb| {
- const limb_tz = @ctz(std.math.big.Limb, limb);
+ const limb_tz = @ctz(limb);
result += limb_tz;
if (limb_tz != @sizeOf(std.math.big.Limb) * 8) break;
}
@@ -1653,7 +1663,7 @@ pub const Value = extern union {
.zero, .bool_false => return 0,
.one, .bool_true => return 1,
- .int_u64 => return @popCount(u64, val.castTag(.int_u64).?.data),
+ .int_u64 => return @popCount(val.castTag(.int_u64).?.data),
else => {
const info = ty.intInfo(target);
@@ -1994,6 +2004,10 @@ pub const Value = extern union {
return (try orderAgainstZeroAdvanced(lhs, sema_kit)).compare(op);
}
+ pub fn eql(a: Value, b: Value, ty: Type, mod: *Module) bool {
+ return eqlAdvanced(a, ty, b, ty, mod, null) catch unreachable;
+ }
+
/// This function is used by hash maps and so treats floating-point NaNs as equal
/// to each other, and not equal to other floating-point values.
/// Similarly, it treats `undef` as a distinct value from all other values.
@@ -2002,13 +2016,10 @@ pub const Value = extern union {
/// for `a`. This function must act *as if* `a` has been coerced to `ty`. This complication
/// is required in order to make generic function instantiation efficient - specifically
/// the insertion into the monomorphized function table.
- pub fn eql(a: Value, b: Value, ty: Type, mod: *Module) bool {
- return eqlAdvanced(a, b, ty, mod, null) catch unreachable;
- }
-
/// If `null` is provided for `sema_kit` then it is guaranteed no error will be returned.
pub fn eqlAdvanced(
a: Value,
+ a_ty: Type,
b: Value,
ty: Type,
mod: *Module,
@@ -2034,33 +2045,34 @@ pub const Value = extern union {
const a_payload = a.castTag(.opt_payload).?.data;
const b_payload = b.castTag(.opt_payload).?.data;
var buffer: Type.Payload.ElemType = undefined;
- return eqlAdvanced(a_payload, b_payload, ty.optionalChild(&buffer), mod, sema_kit);
+ const payload_ty = ty.optionalChild(&buffer);
+ return eqlAdvanced(a_payload, payload_ty, b_payload, payload_ty, mod, sema_kit);
},
.slice => {
const a_payload = a.castTag(.slice).?.data;
const b_payload = b.castTag(.slice).?.data;
- if (!(try eqlAdvanced(a_payload.len, b_payload.len, Type.usize, mod, sema_kit))) {
+ if (!(try eqlAdvanced(a_payload.len, Type.usize, b_payload.len, Type.usize, mod, sema_kit))) {
return false;
}
var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined;
const ptr_ty = ty.slicePtrFieldType(&ptr_buf);
- return eqlAdvanced(a_payload.ptr, b_payload.ptr, ptr_ty, mod, sema_kit);
+ return eqlAdvanced(a_payload.ptr, ptr_ty, b_payload.ptr, ptr_ty, mod, sema_kit);
},
.elem_ptr => {
const a_payload = a.castTag(.elem_ptr).?.data;
const b_payload = b.castTag(.elem_ptr).?.data;
if (a_payload.index != b_payload.index) return false;
- return eqlAdvanced(a_payload.array_ptr, b_payload.array_ptr, ty, mod, sema_kit);
+ return eqlAdvanced(a_payload.array_ptr, ty, b_payload.array_ptr, ty, mod, sema_kit);
},
.field_ptr => {
const a_payload = a.castTag(.field_ptr).?.data;
const b_payload = b.castTag(.field_ptr).?.data;
if (a_payload.field_index != b_payload.field_index) return false;
- return eqlAdvanced(a_payload.container_ptr, b_payload.container_ptr, ty, mod, sema_kit);
+ return eqlAdvanced(a_payload.container_ptr, ty, b_payload.container_ptr, ty, mod, sema_kit);
},
.@"error" => {
const a_name = a.castTag(.@"error").?.data.name;
@@ -2070,7 +2082,8 @@ pub const Value = extern union {
.eu_payload => {
const a_payload = a.castTag(.eu_payload).?.data;
const b_payload = b.castTag(.eu_payload).?.data;
- return eqlAdvanced(a_payload, b_payload, ty.errorUnionPayload(), mod, sema_kit);
+ const payload_ty = ty.errorUnionPayload();
+ return eqlAdvanced(a_payload, payload_ty, b_payload, payload_ty, mod, sema_kit);
},
.eu_payload_ptr => @panic("TODO: Implement more pointer eql cases"),
.opt_payload_ptr => @panic("TODO: Implement more pointer eql cases"),
@@ -2088,7 +2101,7 @@ pub const Value = extern union {
const types = ty.tupleFields().types;
assert(types.len == a_field_vals.len);
for (types) |field_ty, i| {
- if (!(try eqlAdvanced(a_field_vals[i], b_field_vals[i], field_ty, mod, sema_kit))) {
+ if (!(try eqlAdvanced(a_field_vals[i], field_ty, b_field_vals[i], field_ty, mod, sema_kit))) {
return false;
}
}
@@ -2099,7 +2112,7 @@ pub const Value = extern union {
const fields = ty.structFields().values();
assert(fields.len == a_field_vals.len);
for (fields) |field, i| {
- if (!(try eqlAdvanced(a_field_vals[i], b_field_vals[i], field.ty, mod, sema_kit))) {
+ if (!(try eqlAdvanced(a_field_vals[i], field.ty, b_field_vals[i], field.ty, mod, sema_kit))) {
return false;
}
}
@@ -2110,7 +2123,7 @@ pub const Value = extern union {
for (a_field_vals) |a_elem, i| {
const b_elem = b_field_vals[i];
- if (!(try eqlAdvanced(a_elem, b_elem, elem_ty, mod, sema_kit))) {
+ if (!(try eqlAdvanced(a_elem, elem_ty, b_elem, elem_ty, mod, sema_kit))) {
return false;
}
}
@@ -2122,7 +2135,7 @@ pub const Value = extern union {
switch (ty.containerLayout()) {
.Packed, .Extern => {
const tag_ty = ty.unionTagTypeHypothetical();
- if (!(try a_union.tag.eqlAdvanced(b_union.tag, tag_ty, mod, sema_kit))) {
+ if (!(try eqlAdvanced(a_union.tag, tag_ty, b_union.tag, tag_ty, mod, sema_kit))) {
// In this case, we must disregard mismatching tags and compare
// based on the in-memory bytes of the payloads.
@panic("TODO comptime comparison of extern union values with mismatching tags");
@@ -2130,13 +2143,13 @@ pub const Value = extern union {
},
.Auto => {
const tag_ty = ty.unionTagTypeHypothetical();
- if (!(try a_union.tag.eqlAdvanced(b_union.tag, tag_ty, mod, sema_kit))) {
+ if (!(try eqlAdvanced(a_union.tag, tag_ty, b_union.tag, tag_ty, mod, sema_kit))) {
return false;
}
},
}
const active_field_ty = ty.unionFieldType(a_union.tag, mod);
- return a_union.val.eqlAdvanced(b_union.val, active_field_ty, mod, sema_kit);
+ return eqlAdvanced(a_union.val, active_field_ty, b_union.val, active_field_ty, mod, sema_kit);
},
else => {},
} else if (a_tag == .null_value or b_tag == .null_value) {
@@ -2170,7 +2183,7 @@ pub const Value = extern union {
const b_val = b.enumToInt(ty, &buf_b);
var buf_ty: Type.Payload.Bits = undefined;
const int_ty = ty.intTagType(&buf_ty);
- return eqlAdvanced(a_val, b_val, int_ty, mod, sema_kit);
+ return eqlAdvanced(a_val, int_ty, b_val, int_ty, mod, sema_kit);
},
.Array, .Vector => {
const len = ty.arrayLen();
@@ -2181,17 +2194,44 @@ pub const Value = extern union {
while (i < len) : (i += 1) {
const a_elem = elemValueBuffer(a, mod, i, &a_buf);
const b_elem = elemValueBuffer(b, mod, i, &b_buf);
- if (!(try eqlAdvanced(a_elem, b_elem, elem_ty, mod, sema_kit))) {
+ if (!(try eqlAdvanced(a_elem, elem_ty, b_elem, elem_ty, mod, sema_kit))) {
return false;
}
}
return true;
},
.Struct => {
- // A tuple can be represented with .empty_struct_value,
- // the_one_possible_value, .aggregate in which case we could
- // end up here and the values are equal if the type has zero fields.
- return ty.isTupleOrAnonStruct() and ty.structFieldCount() != 0;
+ // A struct can be represented with one of:
+ // .empty_struct_value,
+ // .the_one_possible_value,
+ // .aggregate,
+ // Note that we already checked above for matching tags, e.g. both .aggregate.
+ return ty.onePossibleValue() != null;
+ },
+ .Union => {
+ // Here we have to check for value equality, as-if `a` has been coerced to `ty`.
+ if (ty.onePossibleValue() != null) {
+ return true;
+ }
+ if (a_ty.castTag(.anon_struct)) |payload| {
+ const tuple = payload.data;
+ if (tuple.values.len != 1) {
+ return false;
+ }
+ const field_name = tuple.names[0];
+ const union_obj = ty.cast(Type.Payload.Union).?.data;
+ const field_index = union_obj.fields.getIndex(field_name) orelse return false;
+ const tag_and_val = b.castTag(.@"union").?.data;
+ var field_tag_buf: Value.Payload.U32 = .{
+ .base = .{ .tag = .enum_field_index },
+ .data = @intCast(u32, field_index),
+ };
+ const field_tag = Value.initPayload(&field_tag_buf.base);
+ const tag_matches = tag_and_val.tag.eql(field_tag, union_obj.tag_ty, mod);
+ if (!tag_matches) return false;
+ return eqlAdvanced(tag_and_val.val, union_obj.tag_ty, tuple.values[0], tuple.types[0], mod, sema_kit);
+ }
+ return false;
},
.Float => {
switch (ty.floatBits(target)) {
@@ -2220,7 +2260,8 @@ pub const Value = extern union {
.base = .{ .tag = .opt_payload },
.data = a,
};
- return eqlAdvanced(Value.initPayload(&buffer.base), b, ty, mod, sema_kit);
+ const opt_val = Value.initPayload(&buffer.base);
+ return eqlAdvanced(opt_val, ty, b, ty, mod, sema_kit);
}
},
else => {},
@@ -2648,6 +2689,12 @@ pub const Value = extern union {
// to have only one possible value itself.
.the_only_possible_value => return val,
+ // pointer to integer casted to pointer of array
+ .int_u64, .int_i64 => {
+ assert(index == 0);
+ return val;
+ },
+
else => unreachable,
}
}
@@ -3472,44 +3519,6 @@ pub const Value = extern union {
return fromBigInt(allocator, result_q.toConst());
}
- pub fn intRem(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value {
- if (ty.zigTypeTag() == .Vector) {
- const result_data = try allocator.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
- scalar.* = try intRemScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator, target);
- }
- return Value.Tag.aggregate.create(allocator, result_data);
- }
- return intRemScalar(lhs, rhs, allocator, target);
- }
-
- pub fn intRemScalar(lhs: Value, rhs: Value, allocator: Allocator, target: Target) !Value {
- // TODO is this a performance issue? maybe we should try the operation without
- // resorting to BigInt first.
- var lhs_space: Value.BigIntSpace = undefined;
- var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space, target);
- const rhs_bigint = rhs.toBigInt(&rhs_space, target);
- const limbs_q = try allocator.alloc(
- std.math.big.Limb,
- lhs_bigint.limbs.len,
- );
- const limbs_r = try allocator.alloc(
- std.math.big.Limb,
- // TODO: consider reworking Sema to re-use Values rather than
- // always producing new Value objects.
- rhs_bigint.limbs.len,
- );
- const limbs_buffer = try allocator.alloc(
- std.math.big.Limb,
- std.math.big.int.calcDivLimbsBufferLen(lhs_bigint.limbs.len, rhs_bigint.limbs.len),
- );
- var result_q = BigIntMutable{ .limbs = limbs_q, .positive = undefined, .len = undefined };
- var result_r = BigIntMutable{ .limbs = limbs_r, .positive = undefined, .len = undefined };
- result_q.divTrunc(&result_r, lhs_bigint, rhs_bigint, limbs_buffer);
- return fromBigInt(allocator, result_r.toConst());
- }
-
pub fn intMod(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value {
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
diff --git a/test/behavior.zig b/test/behavior.zig
index 813c410f7b..8f581f372e 100644
--- a/test/behavior.zig
+++ b/test/behavior.zig
@@ -26,7 +26,6 @@ test {
_ = @import("behavior/bugs/920.zig");
_ = @import("behavior/bugs/1025.zig");
_ = @import("behavior/bugs/1076.zig");
- _ = @import("behavior/bugs/1111.zig");
_ = @import("behavior/bugs/1277.zig");
_ = @import("behavior/bugs/1310.zig");
_ = @import("behavior/bugs/1381.zig");
@@ -84,6 +83,8 @@ test {
_ = @import("behavior/bugs/11213.zig");
_ = @import("behavior/bugs/12003.zig");
_ = @import("behavior/bugs/12033.zig");
+ _ = @import("behavior/bugs/12430.zig");
+ _ = @import("behavior/bugs/12486.zig");
_ = @import("behavior/byteswap.zig");
_ = @import("behavior/byval_arg_var.zig");
_ = @import("behavior/call.zig");
@@ -159,12 +160,14 @@ test {
_ = @import("behavior/while.zig");
_ = @import("behavior/widening.zig");
- if (builtin.stage2_arch == .wasm32) {
+ if (builtin.cpu.arch == .wasm32) {
_ = @import("behavior/wasm.zig");
}
if (builtin.zig_backend != .stage1) {
_ = @import("behavior/decltest.zig");
+ _ = @import("behavior/packed_struct_explicit_backing_int.zig");
+ _ = @import("behavior/empty_union.zig");
}
if (builtin.os.tag != .wasi) {
diff --git a/test/behavior/align.zig b/test/behavior/align.zig
index 26e3d91373..ad857fb9c2 100644
--- a/test/behavior/align.zig
+++ b/test/behavior/align.zig
@@ -100,8 +100,8 @@ test "alignment and size of structs with 128-bit fields" {
.a_align = 8,
.a_size = 16,
- .b_align = 8,
- .b_size = 24,
+ .b_align = 16,
+ .b_size = 32,
.u128_align = 8,
.u128_size = 16,
@@ -114,8 +114,8 @@ test "alignment and size of structs with 128-bit fields" {
.a_align = 8,
.a_size = 16,
- .b_align = 8,
- .b_size = 24,
+ .b_align = 16,
+ .b_size = 32,
.u128_align = 8,
.u128_size = 16,
@@ -126,8 +126,8 @@ test "alignment and size of structs with 128-bit fields" {
.a_align = 4,
.a_size = 16,
- .b_align = 4,
- .b_size = 20,
+ .b_align = 16,
+ .b_size = 32,
.u128_align = 4,
.u128_size = 16,
@@ -140,12 +140,39 @@ test "alignment and size of structs with 128-bit fields" {
.mips64el,
.powerpc64,
.powerpc64le,
- .riscv64,
.sparc64,
.x86_64,
+ => switch (builtin.object_format) {
+ .c => .{
+ .a_align = 16,
+ .a_size = 16,
+
+ .b_align = 16,
+ .b_size = 32,
+
+ .u128_align = 16,
+ .u128_size = 16,
+ .u129_align = 16,
+ .u129_size = 32,
+ },
+ else => .{
+ .a_align = 8,
+ .a_size = 16,
+
+ .b_align = 16,
+ .b_size = 32,
+
+ .u128_align = 8,
+ .u128_size = 16,
+ .u129_align = 8,
+ .u129_size = 24,
+ },
+ },
+
.aarch64,
.aarch64_be,
.aarch64_32,
+ .riscv64,
.bpfel,
.bpfeb,
.nvptx,
@@ -166,17 +193,17 @@ test "alignment and size of structs with 128-bit fields" {
else => return error.SkipZigTest,
};
comptime {
- std.debug.assert(@alignOf(A) == expected.a_align);
- std.debug.assert(@sizeOf(A) == expected.a_size);
+ assert(@alignOf(A) == expected.a_align);
+ assert(@sizeOf(A) == expected.a_size);
- std.debug.assert(@alignOf(B) == expected.b_align);
- std.debug.assert(@sizeOf(B) == expected.b_size);
+ assert(@alignOf(B) == expected.b_align);
+ assert(@sizeOf(B) == expected.b_size);
- std.debug.assert(@alignOf(u128) == expected.u128_align);
- std.debug.assert(@sizeOf(u128) == expected.u128_size);
+ assert(@alignOf(u128) == expected.u128_align);
+ assert(@sizeOf(u128) == expected.u128_size);
- std.debug.assert(@alignOf(u129) == expected.u129_align);
- std.debug.assert(@sizeOf(u129) == expected.u129_size);
+ assert(@alignOf(u129) == expected.u129_align);
+ assert(@sizeOf(u129) == expected.u129_size);
}
}
diff --git a/test/behavior/basic.zig b/test/behavior/basic.zig
index 1a1412420a..4d8b176fbf 100644
--- a/test/behavior/basic.zig
+++ b/test/behavior/basic.zig
@@ -1104,3 +1104,24 @@ test "namespace lookup ignores decl causing the lookup" {
};
_ = S.foo();
}
+
+test "ambiguous reference error ignores current declaration" {
+ const S = struct {
+ const foo = 666;
+
+ const a = @This();
+ const b = struct {
+ const foo = a.foo;
+ const bar = struct {
+ bar: u32 = b.foo,
+ };
+
+ comptime {
+ _ = b.foo;
+ }
+ };
+
+ usingnamespace b;
+ };
+ try expect(S.b.foo == 666);
+}
diff --git a/test/behavior/bitreverse.zig b/test/behavior/bitreverse.zig
index 585fe381b0..092f3a1fed 100644
--- a/test/behavior/bitreverse.zig
+++ b/test/behavior/bitreverse.zig
@@ -8,7 +8,7 @@ test "@bitReverse large exotic integer" {
// Currently failing on stage1 for big-endian targets
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
- try expect(@bitReverse(u95, @as(u95, 0x123456789abcdef111213141)) == 0x4146424447bd9eac8f351624);
+ try expect(@bitReverse(@as(u95, 0x123456789abcdef111213141)) == 0x4146424447bd9eac8f351624);
}
test "@bitReverse" {
@@ -23,74 +23,74 @@ test "@bitReverse" {
fn testBitReverse() !void {
// using comptime_ints, unsigned
- try expect(@bitReverse(u0, @as(u0, 0)) == 0);
- try expect(@bitReverse(u5, @as(u5, 0x12)) == 0x9);
- try expect(@bitReverse(u8, @as(u8, 0x12)) == 0x48);
- try expect(@bitReverse(u16, @as(u16, 0x1234)) == 0x2c48);
- try expect(@bitReverse(u24, @as(u24, 0x123456)) == 0x6a2c48);
- try expect(@bitReverse(u32, @as(u32, 0x12345678)) == 0x1e6a2c48);
- try expect(@bitReverse(u40, @as(u40, 0x123456789a)) == 0x591e6a2c48);
- try expect(@bitReverse(u48, @as(u48, 0x123456789abc)) == 0x3d591e6a2c48);
- try expect(@bitReverse(u56, @as(u56, 0x123456789abcde)) == 0x7b3d591e6a2c48);
- try expect(@bitReverse(u64, @as(u64, 0x123456789abcdef1)) == 0x8f7b3d591e6a2c48);
- try expect(@bitReverse(u96, @as(u96, 0x123456789abcdef111213141)) == 0x828c84888f7b3d591e6a2c48);
- try expect(@bitReverse(u128, @as(u128, 0x123456789abcdef11121314151617181)) == 0x818e868a828c84888f7b3d591e6a2c48);
+ try expect(@bitReverse(@as(u0, 0)) == 0);
+ try expect(@bitReverse(@as(u5, 0x12)) == 0x9);
+ try expect(@bitReverse(@as(u8, 0x12)) == 0x48);
+ try expect(@bitReverse(@as(u16, 0x1234)) == 0x2c48);
+ try expect(@bitReverse(@as(u24, 0x123456)) == 0x6a2c48);
+ try expect(@bitReverse(@as(u32, 0x12345678)) == 0x1e6a2c48);
+ try expect(@bitReverse(@as(u40, 0x123456789a)) == 0x591e6a2c48);
+ try expect(@bitReverse(@as(u48, 0x123456789abc)) == 0x3d591e6a2c48);
+ try expect(@bitReverse(@as(u56, 0x123456789abcde)) == 0x7b3d591e6a2c48);
+ try expect(@bitReverse(@as(u64, 0x123456789abcdef1)) == 0x8f7b3d591e6a2c48);
+ try expect(@bitReverse(@as(u96, 0x123456789abcdef111213141)) == 0x828c84888f7b3d591e6a2c48);
+ try expect(@bitReverse(@as(u128, 0x123456789abcdef11121314151617181)) == 0x818e868a828c84888f7b3d591e6a2c48);
// using runtime uints, unsigned
var num0: u0 = 0;
- try expect(@bitReverse(u0, num0) == 0);
+ try expect(@bitReverse(num0) == 0);
var num5: u5 = 0x12;
- try expect(@bitReverse(u5, num5) == 0x9);
+ try expect(@bitReverse(num5) == 0x9);
var num8: u8 = 0x12;
- try expect(@bitReverse(u8, num8) == 0x48);
+ try expect(@bitReverse(num8) == 0x48);
var num16: u16 = 0x1234;
- try expect(@bitReverse(u16, num16) == 0x2c48);
+ try expect(@bitReverse(num16) == 0x2c48);
var num24: u24 = 0x123456;
- try expect(@bitReverse(u24, num24) == 0x6a2c48);
+ try expect(@bitReverse(num24) == 0x6a2c48);
var num32: u32 = 0x12345678;
- try expect(@bitReverse(u32, num32) == 0x1e6a2c48);
+ try expect(@bitReverse(num32) == 0x1e6a2c48);
var num40: u40 = 0x123456789a;
- try expect(@bitReverse(u40, num40) == 0x591e6a2c48);
+ try expect(@bitReverse(num40) == 0x591e6a2c48);
var num48: u48 = 0x123456789abc;
- try expect(@bitReverse(u48, num48) == 0x3d591e6a2c48);
+ try expect(@bitReverse(num48) == 0x3d591e6a2c48);
var num56: u56 = 0x123456789abcde;
- try expect(@bitReverse(u56, num56) == 0x7b3d591e6a2c48);
+ try expect(@bitReverse(num56) == 0x7b3d591e6a2c48);
var num64: u64 = 0x123456789abcdef1;
- try expect(@bitReverse(u64, num64) == 0x8f7b3d591e6a2c48);
+ try expect(@bitReverse(num64) == 0x8f7b3d591e6a2c48);
var num128: u128 = 0x123456789abcdef11121314151617181;
- try expect(@bitReverse(u128, num128) == 0x818e868a828c84888f7b3d591e6a2c48);
+ try expect(@bitReverse(num128) == 0x818e868a828c84888f7b3d591e6a2c48);
// using comptime_ints, signed, positive
- try expect(@bitReverse(u8, @as(u8, 0)) == 0);
- try expect(@bitReverse(i8, @bitCast(i8, @as(u8, 0x92))) == @bitCast(i8, @as(u8, 0x49)));
- try expect(@bitReverse(i16, @bitCast(i16, @as(u16, 0x1234))) == @bitCast(i16, @as(u16, 0x2c48)));
- try expect(@bitReverse(i24, @bitCast(i24, @as(u24, 0x123456))) == @bitCast(i24, @as(u24, 0x6a2c48)));
- try expect(@bitReverse(i24, @bitCast(i24, @as(u24, 0x12345f))) == @bitCast(i24, @as(u24, 0xfa2c48)));
- try expect(@bitReverse(i24, @bitCast(i24, @as(u24, 0xf23456))) == @bitCast(i24, @as(u24, 0x6a2c4f)));
- try expect(@bitReverse(i32, @bitCast(i32, @as(u32, 0x12345678))) == @bitCast(i32, @as(u32, 0x1e6a2c48)));
- try expect(@bitReverse(i32, @bitCast(i32, @as(u32, 0xf2345678))) == @bitCast(i32, @as(u32, 0x1e6a2c4f)));
- try expect(@bitReverse(i32, @bitCast(i32, @as(u32, 0x1234567f))) == @bitCast(i32, @as(u32, 0xfe6a2c48)));
- try expect(@bitReverse(i40, @bitCast(i40, @as(u40, 0x123456789a))) == @bitCast(i40, @as(u40, 0x591e6a2c48)));
- try expect(@bitReverse(i48, @bitCast(i48, @as(u48, 0x123456789abc))) == @bitCast(i48, @as(u48, 0x3d591e6a2c48)));
- try expect(@bitReverse(i56, @bitCast(i56, @as(u56, 0x123456789abcde))) == @bitCast(i56, @as(u56, 0x7b3d591e6a2c48)));
- try expect(@bitReverse(i64, @bitCast(i64, @as(u64, 0x123456789abcdef1))) == @bitCast(i64, @as(u64, 0x8f7b3d591e6a2c48)));
- try expect(@bitReverse(i96, @bitCast(i96, @as(u96, 0x123456789abcdef111213141))) == @bitCast(i96, @as(u96, 0x828c84888f7b3d591e6a2c48)));
- try expect(@bitReverse(i128, @bitCast(i128, @as(u128, 0x123456789abcdef11121314151617181))) == @bitCast(i128, @as(u128, 0x818e868a828c84888f7b3d591e6a2c48)));
+ try expect(@bitReverse(@as(u8, 0)) == 0);
+ try expect(@bitReverse(@bitCast(i8, @as(u8, 0x92))) == @bitCast(i8, @as(u8, 0x49)));
+ try expect(@bitReverse(@bitCast(i16, @as(u16, 0x1234))) == @bitCast(i16, @as(u16, 0x2c48)));
+ try expect(@bitReverse(@bitCast(i24, @as(u24, 0x123456))) == @bitCast(i24, @as(u24, 0x6a2c48)));
+ try expect(@bitReverse(@bitCast(i24, @as(u24, 0x12345f))) == @bitCast(i24, @as(u24, 0xfa2c48)));
+ try expect(@bitReverse(@bitCast(i24, @as(u24, 0xf23456))) == @bitCast(i24, @as(u24, 0x6a2c4f)));
+ try expect(@bitReverse(@bitCast(i32, @as(u32, 0x12345678))) == @bitCast(i32, @as(u32, 0x1e6a2c48)));
+ try expect(@bitReverse(@bitCast(i32, @as(u32, 0xf2345678))) == @bitCast(i32, @as(u32, 0x1e6a2c4f)));
+ try expect(@bitReverse(@bitCast(i32, @as(u32, 0x1234567f))) == @bitCast(i32, @as(u32, 0xfe6a2c48)));
+ try expect(@bitReverse(@bitCast(i40, @as(u40, 0x123456789a))) == @bitCast(i40, @as(u40, 0x591e6a2c48)));
+ try expect(@bitReverse(@bitCast(i48, @as(u48, 0x123456789abc))) == @bitCast(i48, @as(u48, 0x3d591e6a2c48)));
+ try expect(@bitReverse(@bitCast(i56, @as(u56, 0x123456789abcde))) == @bitCast(i56, @as(u56, 0x7b3d591e6a2c48)));
+ try expect(@bitReverse(@bitCast(i64, @as(u64, 0x123456789abcdef1))) == @bitCast(i64, @as(u64, 0x8f7b3d591e6a2c48)));
+ try expect(@bitReverse(@bitCast(i96, @as(u96, 0x123456789abcdef111213141))) == @bitCast(i96, @as(u96, 0x828c84888f7b3d591e6a2c48)));
+ try expect(@bitReverse(@bitCast(i128, @as(u128, 0x123456789abcdef11121314151617181))) == @bitCast(i128, @as(u128, 0x818e868a828c84888f7b3d591e6a2c48)));
// using signed, negative. Compare to runtime ints returned from llvm.
var neg8: i8 = -18;
- try expect(@bitReverse(i8, @as(i8, -18)) == @bitReverse(i8, neg8));
+ try expect(@bitReverse(@as(i8, -18)) == @bitReverse(neg8));
var neg16: i16 = -32694;
- try expect(@bitReverse(i16, @as(i16, -32694)) == @bitReverse(i16, neg16));
+ try expect(@bitReverse(@as(i16, -32694)) == @bitReverse(neg16));
var neg24: i24 = -6773785;
- try expect(@bitReverse(i24, @as(i24, -6773785)) == @bitReverse(i24, neg24));
+ try expect(@bitReverse(@as(i24, -6773785)) == @bitReverse(neg24));
var neg32: i32 = -16773785;
- try expect(@bitReverse(i32, @as(i32, -16773785)) == @bitReverse(i32, neg32));
+ try expect(@bitReverse(@as(i32, -16773785)) == @bitReverse(neg32));
}
fn vector8() !void {
var v = @Vector(2, u8){ 0x12, 0x23 };
- var result = @bitReverse(u8, v);
+ var result = @bitReverse(v);
try expect(result[0] == 0x48);
try expect(result[1] == 0xc4);
}
@@ -109,7 +109,7 @@ test "bitReverse vectors u8" {
fn vector16() !void {
var v = @Vector(2, u16){ 0x1234, 0x2345 };
- var result = @bitReverse(u16, v);
+ var result = @bitReverse(v);
try expect(result[0] == 0x2c48);
try expect(result[1] == 0xa2c4);
}
@@ -128,7 +128,7 @@ test "bitReverse vectors u16" {
fn vector24() !void {
var v = @Vector(2, u24){ 0x123456, 0x234567 };
- var result = @bitReverse(u24, v);
+ var result = @bitReverse(v);
try expect(result[0] == 0x6a2c48);
try expect(result[1] == 0xe6a2c4);
}
@@ -147,7 +147,7 @@ test "bitReverse vectors u24" {
fn vector0() !void {
var v = @Vector(2, u0){ 0, 0 };
- var result = @bitReverse(u0, v);
+ var result = @bitReverse(v);
try expect(result[0] == 0);
try expect(result[1] == 0);
}
diff --git a/test/behavior/bugs/10147.zig b/test/behavior/bugs/10147.zig
index b519c71865..221120bb73 100644
--- a/test/behavior/bugs/10147.zig
+++ b/test/behavior/bugs/10147.zig
@@ -2,6 +2,7 @@ const builtin = @import("builtin");
const std = @import("std");
test "uses correct LLVM builtin" {
+ if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@@ -12,8 +13,8 @@ test "uses correct LLVM builtin" {
var y: @Vector(4, u32) = [_]u32{ 0x1, 0x1, 0x1, 0x1 };
// The stage1 compiler used to call the same builtin function for both
// scalar and vector inputs, causing the LLVM module verification to fail.
- var a = @clz(u32, x);
- var b = @clz(u32, y);
+ var a = @clz(x);
+ var b = @clz(y);
try std.testing.expectEqual(@as(u6, 31), a);
try std.testing.expectEqual([_]u6{ 31, 31, 31, 31 }, b);
}
diff --git a/test/behavior/bugs/1111.zig b/test/behavior/bugs/1111.zig
deleted file mode 100644
index d274befaf3..0000000000
--- a/test/behavior/bugs/1111.zig
+++ /dev/null
@@ -1,11 +0,0 @@
-const Foo = enum(c_int) {
- Bar = -1,
-};
-
-test "issue 1111 fixed" {
- const v = Foo.Bar;
-
- switch (v) {
- Foo.Bar => return,
- }
-}
diff --git a/test/behavior/bugs/12430.zig b/test/behavior/bugs/12430.zig
new file mode 100644
index 0000000000..cbf1658b1c
--- /dev/null
+++ b/test/behavior/bugs/12430.zig
@@ -0,0 +1,11 @@
+const std = @import("std");
+
+test {
+ const T = comptime b: {
+ break :b @Type(.{ .Int = .{
+ .signedness = .unsigned,
+ .bits = 8,
+ } });
+ };
+ try std.testing.expect(T == u8);
+}
diff --git a/test/behavior/bugs/12486.zig b/test/behavior/bugs/12486.zig
new file mode 100644
index 0000000000..f0d357efd1
--- /dev/null
+++ b/test/behavior/bugs/12486.zig
@@ -0,0 +1,49 @@
+const SomeEnum = union(enum) {
+ EnumVariant: u8,
+};
+
+const SomeStruct = struct {
+ struct_field: u8,
+};
+
+const OptEnum = struct {
+ opt_enum: ?SomeEnum,
+};
+
+const ErrEnum = struct {
+ err_enum: anyerror!SomeEnum,
+};
+
+const OptStruct = struct {
+ opt_struct: ?SomeStruct,
+};
+
+const ErrStruct = struct {
+ err_struct: anyerror!SomeStruct,
+};
+
+test {
+ _ = OptEnum{
+ .opt_enum = .{
+ .EnumVariant = 1,
+ },
+ };
+
+ _ = ErrEnum{
+ .err_enum = .{
+ .EnumVariant = 1,
+ },
+ };
+
+ _ = OptStruct{
+ .opt_struct = .{
+ .struct_field = 1,
+ },
+ };
+
+ _ = ErrStruct{
+ .err_struct = .{
+ .struct_field = 1,
+ },
+ };
+}
diff --git a/test/behavior/bugs/2114.zig b/test/behavior/bugs/2114.zig
index 54826cb0d1..a013688f4e 100644
--- a/test/behavior/bugs/2114.zig
+++ b/test/behavior/bugs/2114.zig
@@ -4,7 +4,7 @@ const expect = std.testing.expect;
const math = std.math;
fn ctz(x: anytype) usize {
- return @ctz(@TypeOf(x), x);
+ return @ctz(x);
}
test "fixed" {
diff --git a/test/behavior/byteswap.zig b/test/behavior/byteswap.zig
index 263f2c046e..5313b612b0 100644
--- a/test/behavior/byteswap.zig
+++ b/test/behavior/byteswap.zig
@@ -3,6 +3,7 @@ const builtin = @import("builtin");
const expect = std.testing.expect;
test "@byteSwap integers" {
+ if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
@@ -46,7 +47,7 @@ test "@byteSwap integers" {
);
}
fn t(comptime I: type, input: I, expected_output: I) !void {
- try std.testing.expect(expected_output == @byteSwap(I, input));
+ try std.testing.expect(expected_output == @byteSwap(input));
}
};
comptime try ByteSwapIntTest.run();
@@ -55,12 +56,13 @@ test "@byteSwap integers" {
fn vector8() !void {
var v = @Vector(2, u8){ 0x12, 0x13 };
- var result = @byteSwap(u8, v);
+ var result = @byteSwap(v);
try expect(result[0] == 0x12);
try expect(result[1] == 0x13);
}
test "@byteSwap vectors u8" {
+ if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
@@ -73,12 +75,13 @@ test "@byteSwap vectors u8" {
fn vector16() !void {
var v = @Vector(2, u16){ 0x1234, 0x2345 };
- var result = @byteSwap(u16, v);
+ var result = @byteSwap(v);
try expect(result[0] == 0x3412);
try expect(result[1] == 0x4523);
}
test "@byteSwap vectors u16" {
+ if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
@@ -91,12 +94,13 @@ test "@byteSwap vectors u16" {
fn vector24() !void {
var v = @Vector(2, u24){ 0x123456, 0x234567 };
- var result = @byteSwap(u24, v);
+ var result = @byteSwap(v);
try expect(result[0] == 0x563412);
try expect(result[1] == 0x674523);
}
test "@byteSwap vectors u24" {
+ if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
@@ -109,12 +113,13 @@ test "@byteSwap vectors u24" {
fn vector0() !void {
var v = @Vector(2, u0){ 0, 0 };
- var result = @byteSwap(u0, v);
+ var result = @byteSwap(v);
try expect(result[0] == 0);
try expect(result[1] == 0);
}
test "@byteSwap vectors u0" {
+ if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
diff --git a/test/behavior/call.zig b/test/behavior/call.zig
index 0297582234..eafd2ef4e9 100644
--- a/test/behavior/call.zig
+++ b/test/behavior/call.zig
@@ -246,3 +246,18 @@ test "function call with 40 arguments" {
};
try S.doTheTest(39);
}
+
+test "arguments to comptime parameters generated in comptime blocks" {
+ if (builtin.zig_backend == .stage1) return error.SkipZigTest;
+
+ const S = struct {
+ fn fortyTwo() i32 {
+ return 42;
+ }
+
+ fn foo(comptime x: i32) void {
+ if (x != 42) @compileError("bad");
+ }
+ };
+ S.foo(S.fortyTwo());
+}
diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig
index e899def4aa..fa0877258c 100644
--- a/test/behavior/cast.zig
+++ b/test/behavior/cast.zig
@@ -1281,7 +1281,7 @@ test "*const [N]null u8 to ?[]const u8" {
test "cast between [*c]T and ?[*:0]T on fn parameter" {
const S = struct {
const Handler = ?fn ([*c]const u8) callconv(.C) void;
- fn addCallback(handler: Handler) void {
+ fn addCallback(comptime handler: Handler) void {
_ = handler;
}
@@ -1431,6 +1431,11 @@ test "coerce between pointers of compatible differently-named floats" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.os.tag == .windows) {
+ // https://github.com/ziglang/zig/issues/12396
+ return error.SkipZigTest;
+ }
+
const F = switch (@typeInfo(c_longdouble).Float.bits) {
16 => f16,
32 => f32,
diff --git a/test/behavior/comptime_memory.zig b/test/behavior/comptime_memory.zig
index 1fc3f64cda..8fa5fc503e 100644
--- a/test/behavior/comptime_memory.zig
+++ b/test/behavior/comptime_memory.zig
@@ -82,7 +82,7 @@ test "type pun value and struct" {
}
fn bigToNativeEndian(comptime T: type, v: T) T {
- return if (endian == .Big) v else @byteSwap(T, v);
+ return if (endian == .Big) v else @byteSwap(v);
}
test "type pun endianness" {
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
diff --git a/test/behavior/empty_union.zig b/test/behavior/empty_union.zig
new file mode 100644
index 0000000000..051e464b72
--- /dev/null
+++ b/test/behavior/empty_union.zig
@@ -0,0 +1,54 @@
+const builtin = @import("builtin");
+const std = @import("std");
+const expect = std.testing.expect;
+
+test "switch on empty enum" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+
+ const E = enum {};
+ var e: E = undefined;
+ switch (e) {}
+}
+
+test "switch on empty enum with a specified tag type" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+
+ const E = enum(u8) {};
+ var e: E = undefined;
+ switch (e) {}
+}
+
+test "switch on empty auto numbered tagged union" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+
+ const U = union(enum(u8)) {};
+ var u: U = undefined;
+ switch (u) {}
+}
+
+test "switch on empty tagged union" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+
+ const E = enum {};
+ const U = union(E) {};
+ var u: U = undefined;
+ switch (u) {}
+}
+
+test "empty union" {
+ const U = union {};
+ try expect(@sizeOf(U) == 0);
+ try expect(@alignOf(U) == 0);
+}
+
+test "empty extern union" {
+ const U = extern union {};
+ try expect(@sizeOf(U) == 0);
+ try expect(@alignOf(U) == 1);
+}
diff --git a/test/behavior/enum.zig b/test/behavior/enum.zig
index 709d30af33..517414780b 100644
--- a/test/behavior/enum.zig
+++ b/test/behavior/enum.zig
@@ -1,6 +1,7 @@
const builtin = @import("builtin");
const std = @import("std");
const expect = std.testing.expect;
+const assert = std.debug.assert;
const mem = std.mem;
const Tag = std.meta.Tag;
@@ -1128,3 +1129,49 @@ test "tag name functions are unique" {
_ = a;
}
}
+
+test "size of enum with only one tag which has explicit integer tag type" {
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+
+ const E = enum(u8) { nope = 10 };
+ const S0 = struct { e: E };
+ const S1 = extern struct { e: E };
+ //const U = union(E) { nope: void };
+ comptime assert(@sizeOf(E) == 1);
+ comptime assert(@sizeOf(S0) == 1);
+ comptime assert(@sizeOf(S1) == 1);
+ //comptime assert(@sizeOf(U) == 1);
+
+ var s1: S1 = undefined;
+ s1.e = .nope;
+ try expect(s1.e == .nope);
+ const ptr = @ptrCast(*u8, &s1);
+ try expect(ptr.* == 10);
+
+ var s0: S0 = undefined;
+ s0.e = .nope;
+ try expect(s0.e == .nope);
+}
+
+test "switch on an extern enum with negative value" {
+ // TODO x86, wasm backends fail because they assume that enum tag types are unsigned
+ if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
+ if (@import("builtin").zig_backend == .stage2_wasm) return error.SkipZigTest;
+
+ const Foo = enum(c_int) {
+ Bar = -1,
+ };
+
+ const v = Foo.Bar;
+
+ switch (v) {
+ Foo.Bar => return,
+ }
+}
+
+test "Non-exhaustive enum with nonstandard int size behaves correctly" {
+ const E = enum(u15) { _ };
+ try expect(@sizeOf(E) == @sizeOf(u15));
+}
diff --git a/test/behavior/error.zig b/test/behavior/error.zig
index 306dad5d9e..684b01a797 100644
--- a/test/behavior/error.zig
+++ b/test/behavior/error.zig
@@ -168,7 +168,7 @@ fn entryPtr() void {
fooPtr(ptr);
}
-fn foo2(f: fn () anyerror!void) void {
+fn foo2(comptime f: fn () anyerror!void) void {
const x = f();
x catch {
@panic("fail");
@@ -725,7 +725,7 @@ test "simple else prong allowed even when all errors handled" {
try expect(value == 255);
}
-test {
+test "pointer to error union payload" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
@@ -736,3 +736,79 @@ test {
const payload_ptr = &(err_union catch unreachable);
try expect(payload_ptr.* == 15);
}
+
+const NoReturn = struct {
+ var a: u32 = undefined;
+ fn someData() bool {
+ a -= 1;
+ return a == 0;
+ }
+ fn loop() !noreturn {
+ while (true) {
+ if (someData())
+ return error.GenericFailure;
+ }
+ }
+ fn testTry() anyerror {
+ try loop();
+ }
+ fn testCatch() anyerror {
+ loop() catch return error.OtherFailure;
+ @compileError("bad");
+ }
+};
+
+test "error union of noreturn used with if" {
+ if (builtin.zig_backend == .stage1) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ NoReturn.a = 64;
+ if (NoReturn.loop()) {
+ @compileError("bad");
+ } else |err| {
+ try expect(err == error.GenericFailure);
+ }
+}
+
+test "error union of noreturn used with try" {
+ if (builtin.zig_backend == .stage1) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ NoReturn.a = 64;
+ const err = NoReturn.testTry();
+ try expect(err == error.GenericFailure);
+}
+
+test "error union of noreturn used with catch" {
+ if (builtin.zig_backend == .stage1) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ NoReturn.a = 64;
+ const err = NoReturn.testCatch();
+ try expect(err == error.OtherFailure);
+}
+
+test "alignment of wrapping an error union payload" {
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+
+ const S = struct {
+ const I = extern struct { x: i128 };
+
+ fn foo() anyerror!I {
+ var i: I = .{ .x = 1234 };
+ return i;
+ }
+ };
+ try expect((S.foo() catch unreachable).x == 1234);
+}
diff --git a/test/behavior/eval.zig b/test/behavior/eval.zig
index d2a75e18df..bc1c3628d7 100644
--- a/test/behavior/eval.zig
+++ b/test/behavior/eval.zig
@@ -1293,3 +1293,57 @@ test "mutate through pointer-like optional at comptime" {
try expect(payload_ptr.*.* == 16);
}
}
+
+test "repeated value is correctly expanded" {
+ const S = struct { x: [4]i8 = std.mem.zeroes([4]i8) };
+ const M = struct { x: [4]S = std.mem.zeroes([4]S) };
+
+ comptime {
+ var res = M{};
+ for (.{ 1, 2, 3 }) |i| res.x[i].x[i] = i;
+
+ try expectEqual(M{ .x = .{
+ .{ .x = .{ 0, 0, 0, 0 } },
+ .{ .x = .{ 0, 1, 0, 0 } },
+ .{ .x = .{ 0, 0, 2, 0 } },
+ .{ .x = .{ 0, 0, 0, 3 } },
+ } }, res);
+ }
+}
+
+test "value in if block is comptime known" {
+ if (builtin.zig_backend == .stage1) return error.SkipZigTest;
+
+ const first = blk: {
+ const s = if (false) "a" else "b";
+ break :blk "foo" ++ s;
+ };
+ const second = blk: {
+ const S = struct { str: []const u8 };
+ const s = if (false) S{ .str = "a" } else S{ .str = "b" };
+ break :blk "foo" ++ s.str;
+ };
+ comptime try expect(std.mem.eql(u8, first, second));
+}
+
+test "lazy sizeof is resolved in division" {
+ const A = struct {
+ a: u32,
+ };
+ const a = 2;
+ try expect(@sizeOf(A) / a == 2);
+ try expect(@sizeOf(A) - a == 2);
+}
+
+test "lazy value is resolved as slice operand" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+
+ const A = struct { a: u32 };
+ var a: [512]u64 = undefined;
+
+ const ptr1 = a[0..@sizeOf(A)];
+ const ptr2 = @ptrCast([*]u8, &a)[0..@sizeOf(A)];
+ try expect(@ptrToInt(ptr1) == @ptrToInt(ptr2));
+ try expect(ptr1.len == ptr2.len);
+}
diff --git a/test/behavior/floatop.zig b/test/behavior/floatop.zig
index c057f7a842..a5eb25d4f5 100644
--- a/test/behavior/floatop.zig
+++ b/test/behavior/floatop.zig
@@ -194,8 +194,8 @@ fn testSin() !void {
const eps = epsForType(ty);
try expect(@sin(@as(ty, 0)) == 0);
try expect(math.approxEqAbs(ty, @sin(@as(ty, std.math.pi)), 0, eps));
- try expect(math.approxEqAbs(ty, @sin(@as(ty, std.math.pi / 2)), 1, eps));
- try expect(math.approxEqAbs(ty, @sin(@as(ty, std.math.pi / 4)), 0.7071067811865475, eps));
+ try expect(math.approxEqAbs(ty, @sin(@as(ty, std.math.pi / 2.0)), 1, eps));
+ try expect(math.approxEqAbs(ty, @sin(@as(ty, std.math.pi / 4.0)), 0.7071067811865475, eps));
}
{
@@ -228,8 +228,8 @@ fn testCos() !void {
const eps = epsForType(ty);
try expect(@cos(@as(ty, 0)) == 1);
try expect(math.approxEqAbs(ty, @cos(@as(ty, std.math.pi)), -1, eps));
- try expect(math.approxEqAbs(ty, @cos(@as(ty, std.math.pi / 2)), 0, eps));
- try expect(math.approxEqAbs(ty, @cos(@as(ty, std.math.pi / 4)), 0.7071067811865475, eps));
+ try expect(math.approxEqAbs(ty, @cos(@as(ty, std.math.pi / 2.0)), 0, eps));
+ try expect(math.approxEqAbs(ty, @cos(@as(ty, std.math.pi / 4.0)), 0.7071067811865475, eps));
}
{
diff --git a/test/behavior/fn.zig b/test/behavior/fn.zig
index 044e4ff049..47ba63c429 100644
--- a/test/behavior/fn.zig
+++ b/test/behavior/fn.zig
@@ -137,7 +137,7 @@ test "implicit cast function unreachable return" {
wantsFnWithVoid(fnWithUnreachable);
}
-fn wantsFnWithVoid(f: fn () void) void {
+fn wantsFnWithVoid(comptime f: fn () void) void {
_ = f;
}
@@ -422,3 +422,24 @@ test "import passed byref to function in return type" {
var list = S.get();
try expect(list.items.len == 0);
}
+
+test "implicit cast function to function ptr" {
+ if (builtin.zig_backend == .stage1) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ const S1 = struct {
+ export fn someFunctionThatReturnsAValue() c_int {
+ return 123;
+ }
+ };
+ var fnPtr1: *const fn () callconv(.C) c_int = S1.someFunctionThatReturnsAValue;
+ try expect(fnPtr1() == 123);
+ const S2 = struct {
+ extern fn someFunctionThatReturnsAValue() c_int;
+ };
+ var fnPtr2: *const fn () callconv(.C) c_int = S2.someFunctionThatReturnsAValue;
+ try expect(fnPtr2() == 123);
+}
diff --git a/test/behavior/generics.zig b/test/behavior/generics.zig
index cd4e018be3..ba4bca0c1a 100644
--- a/test/behavior/generics.zig
+++ b/test/behavior/generics.zig
@@ -323,3 +323,37 @@ test "generic function instantiation non-duplicates" {
S.copy(u8, &buffer, "hello");
S.copy(u8, &buffer, "hello2");
}
+
+test "generic instantiation of tagged union with only one field" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.os.tag == .wasi) return error.SkipZigTest;
+
+ const S = struct {
+ const U = union(enum) {
+ s: []const u8,
+ };
+
+ fn foo(comptime u: U) usize {
+ return u.s.len;
+ }
+ };
+
+ try expect(S.foo(.{ .s = "a" }) == 1);
+ try expect(S.foo(.{ .s = "ab" }) == 2);
+}
+
+test "nested generic function" {
+ const S = struct {
+ fn foo(comptime T: type, callback: *const fn (user_data: T) anyerror!void, data: T) anyerror!void {
+ try callback(data);
+ }
+ fn bar(a: u32) anyerror!void {
+ try expect(a == 123);
+ }
+
+ fn g(_: *const fn (anytype) void) void {}
+ };
+ try expect(@typeInfo(@TypeOf(S.g)).Fn.is_generic);
+ try S.foo(u32, S.bar, 123);
+}
diff --git a/test/behavior/math.zig b/test/behavior/math.zig
index c8d0becbd6..860cbe7042 100644
--- a/test/behavior/math.zig
+++ b/test/behavior/math.zig
@@ -90,10 +90,11 @@ fn testClzBigInts() !void {
}
fn testOneClz(comptime T: type, x: T) u32 {
- return @clz(T, x);
+ return @clz(x);
}
test "@clz vectors" {
+ if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@@ -120,7 +121,7 @@ fn testOneClzVector(
x: @Vector(len, T),
expected: @Vector(len, u32),
) !void {
- try expectVectorsEqual(@clz(T, x), expected);
+ try expectVectorsEqual(@clz(x), expected);
}
fn expectVectorsEqual(a: anytype, b: anytype) !void {
@@ -151,19 +152,18 @@ fn testCtz() !void {
}
fn testOneCtz(comptime T: type, x: T) u32 {
- return @ctz(T, x);
+ return @ctz(x);
}
test "@ctz vectors" {
+ if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
- if ((builtin.zig_backend == .stage1 or builtin.zig_backend == .stage2_llvm) and
- builtin.cpu.arch == .aarch64)
- {
+ if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) {
// This regressed with LLVM 14:
// https://github.com/ziglang/zig/issues/12013
return error.SkipZigTest;
@@ -187,7 +187,7 @@ fn testOneCtzVector(
x: @Vector(len, T),
expected: @Vector(len, u32),
) !void {
- try expectVectorsEqual(@ctz(T, x), expected);
+ try expectVectorsEqual(@ctz(x), expected);
}
test "const number literal" {
@@ -239,10 +239,9 @@ test "quad hex float literal parsing in range" {
}
test "underscore separator parsing" {
- try expect(0_0_0_0 == 0);
try expect(1_234_567 == 1234567);
- try expect(001_234_567 == 1234567);
- try expect(0_0_1_2_3_4_5_6_7 == 1234567);
+ try expect(1_234_567 == 1234567);
+ try expect(1_2_3_4_5_6_7 == 1234567);
try expect(0b0_0_0_0 == 0);
try expect(0b1010_1010 == 0b10101010);
@@ -260,7 +259,7 @@ test "underscore separator parsing" {
try expect(0x1_0_1_0_1_0_1_0 == 0x10101010);
try expect(123_456.789_000e1_0 == 123456.789000e10);
- try expect(0_1_2_3_4_5_6.7_8_9_0_0_0e0_0_1_0 == 123456.789000e10);
+ try expect(1_2_3_4_5_6.7_8_9_0_0_0e0_0_1_0 == 123456.789000e10);
try expect(0x1234_5678.9ABC_DEF0p-1_0 == 0x12345678.9ABCDEF0p-10);
try expect(0x1_2_3_4_5_6_7_8.9_A_B_C_D_E_F_0p-0_0_0_1_0 == 0x12345678.9ABCDEF0p-10);
@@ -1168,6 +1167,7 @@ test "remainder division" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_llvm and builtin.os.tag == .windows) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/12602
comptime try remdiv(f16);
comptime try remdiv(f32);
@@ -1199,6 +1199,7 @@ test "float remainder division using @rem" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_llvm and builtin.os.tag == .windows) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/12602
comptime try frem(f16);
comptime try frem(f32);
@@ -1241,6 +1242,7 @@ test "float modulo division using @mod" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_llvm and builtin.os.tag == .windows) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/12602
comptime try fmod(f16);
comptime try fmod(f32);
@@ -1368,6 +1370,7 @@ test "@floor f80" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_llvm and builtin.os.tag == .windows) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/12602
try testFloor(f80, 12.0);
comptime try testFloor(f80, 12.0);
@@ -1416,6 +1419,7 @@ test "@ceil f80" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_llvm and builtin.os.tag == .windows) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/12602
try testCeil(f80, 12.0);
comptime try testCeil(f80, 12.0);
@@ -1464,6 +1468,7 @@ test "@trunc f80" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_llvm and builtin.os.tag == .windows) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/12602
try testTrunc(f80, 12.0);
comptime try testTrunc(f80, 12.0);
@@ -1526,6 +1531,7 @@ test "@round f80" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_llvm and builtin.os.tag == .windows) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/12602
try testRound(f80, 12.0);
comptime try testRound(f80, 12.0);
@@ -1721,3 +1727,18 @@ fn testAbsFloat() !void {
fn testAbsFloatOne(in: f32, out: f32) !void {
try expect(@fabs(@as(f32, in)) == @as(f32, out));
}
+
+test "mod lazy values" {
+ {
+ const X = struct { x: u32 };
+ const x = @sizeOf(X);
+ const y = 1 % x;
+ _ = y;
+ }
+ {
+ const X = struct { x: u32 };
+ const x = @sizeOf(X);
+ const y = x % 1;
+ _ = y;
+ }
+}
diff --git a/test/behavior/muladd.zig b/test/behavior/muladd.zig
index 01ef8c7d29..861b786a56 100644
--- a/test/behavior/muladd.zig
+++ b/test/behavior/muladd.zig
@@ -51,6 +51,7 @@ test "@mulAdd f80" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_llvm and builtin.os.tag == .windows) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/12602
comptime try testMulAdd80();
try testMulAdd80();
@@ -182,6 +183,7 @@ test "vector f80" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_llvm and builtin.os.tag == .windows) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/12602
comptime try vector80();
try vector80();
diff --git a/test/behavior/optional.zig b/test/behavior/optional.zig
index 4e5eb5061c..eb693147e6 100644
--- a/test/behavior/optional.zig
+++ b/test/behavior/optional.zig
@@ -369,3 +369,62 @@ test "optional pointer to zero bit error union payload" {
some.foo();
} else |_| {}
}
+
+const NoReturn = struct {
+ var a: u32 = undefined;
+ fn someData() bool {
+ a -= 1;
+ return a == 0;
+ }
+ fn loop() ?noreturn {
+ while (true) {
+ if (someData()) return null;
+ }
+ }
+ fn testOrelse() u32 {
+ loop() orelse return 123;
+ @compileError("bad");
+ }
+};
+
+test "optional of noreturn used with if" {
+ if (builtin.zig_backend == .stage1) return error.SkipZigTest;
+
+ NoReturn.a = 64;
+ if (NoReturn.loop()) |_| {
+ @compileError("bad");
+ } else {
+ try expect(true);
+ }
+}
+
+test "optional of noreturn used with orelse" {
+ if (builtin.zig_backend == .stage1) return error.SkipZigTest;
+
+ NoReturn.a = 64;
+ const val = NoReturn.testOrelse();
+ try expect(val == 123);
+}
+
+test "orelse on C pointer" {
+ // TODO https://github.com/ziglang/zig/issues/6597
+ const foo: [*c]const u8 = "hey";
+ const d = foo orelse @compileError("bad");
+ try expectEqual([*c]const u8, @TypeOf(d));
+}
+
+test "alignment of wrapping an optional payload" {
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+
+ const S = struct {
+ const I = extern struct { x: i128 };
+
+ fn foo() ?I {
+ var i: I = .{ .x = 1234 };
+ return i;
+ }
+ };
+ try expect(S.foo().?.x == 1234);
+}
diff --git a/test/behavior/packed-struct.zig b/test/behavior/packed-struct.zig
index 8c34f5741b..bd312e9cda 100644
--- a/test/behavior/packed-struct.zig
+++ b/test/behavior/packed-struct.zig
@@ -434,3 +434,148 @@ test "@ptrToInt on a packed struct field" {
};
try expect(@ptrToInt(&S.p0.z) - @ptrToInt(&S.p0.x) == 2);
}
+
+test "optional pointer in packed struct" {
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
+
+ const T = packed struct { ptr: ?*const u8 };
+ var n: u8 = 0;
+ const x = T{ .ptr = &n };
+ try expect(x.ptr.? == &n);
+}
+
+test "nested packed struct field access test" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ //
+ const Vec2 = packed struct {
+ x: f32,
+ y: f32,
+ };
+
+ const Vec3 = packed struct {
+ x: f32,
+ y: f32,
+ z: f32,
+ };
+
+ const NestedVec2 = packed struct {
+ nested: Vec2,
+ };
+
+ const NestedVec3 = packed struct {
+ nested: Vec3,
+ };
+
+ const vec2 = Vec2{
+ .x = 1.0,
+ .y = 2.0,
+ };
+
+ try std.testing.expectEqual(vec2.x, 1.0);
+ try std.testing.expectEqual(vec2.y, 2.0);
+
+ var vec2_o: Vec2 = undefined;
+ const vec2_o_ptr: *Vec2 = &vec2_o;
+ vec2_o_ptr.* = vec2;
+
+ try std.testing.expectEqual(vec2_o.x, 1.0);
+ try std.testing.expectEqual(vec2_o.y, 2.0);
+
+ const nested_vec2 = NestedVec2{
+ .nested = Vec2{
+ .x = 1.0,
+ .y = 2.0,
+ },
+ };
+
+ try std.testing.expectEqual(nested_vec2.nested.x, 1.0);
+ try std.testing.expectEqual(nested_vec2.nested.y, 2.0);
+
+ var nested_o: NestedVec2 = undefined;
+ const nested_o_ptr: *NestedVec2 = &nested_o;
+ nested_o_ptr.* = nested_vec2;
+
+ try std.testing.expectEqual(nested_o.nested.x, 1.0);
+ try std.testing.expectEqual(nested_o.nested.y, 2.0);
+
+ const vec3 = Vec3{
+ .x = 1.0,
+ .y = 2.0,
+ .z = 3.0,
+ };
+
+ try std.testing.expectEqual(vec3.x, 1.0);
+ try std.testing.expectEqual(vec3.y, 2.0);
+ try std.testing.expectEqual(vec3.z, 3.0);
+
+ var vec3_o: Vec3 = undefined;
+ const vec3_o_ptr: *Vec3 = &vec3_o;
+ vec3_o_ptr.* = vec3;
+
+ try std.testing.expectEqual(vec3_o.x, 1.0);
+ try std.testing.expectEqual(vec3_o.y, 2.0);
+ try std.testing.expectEqual(vec3_o.z, 3.0);
+
+ const nested_vec3 = NestedVec3{
+ .nested = Vec3{
+ .x = 1.0,
+ .y = 2.0,
+ .z = 3.0,
+ },
+ };
+
+ try std.testing.expectEqual(nested_vec3.nested.x, 1.0);
+ try std.testing.expectEqual(nested_vec3.nested.y, 2.0);
+ try std.testing.expectEqual(nested_vec3.nested.z, 3.0);
+
+ var nested_vec3_o: NestedVec3 = undefined;
+ const nested_vec3_o_ptr: *NestedVec3 = &nested_vec3_o;
+ nested_vec3_o_ptr.* = nested_vec3;
+
+ try std.testing.expectEqual(nested_vec3_o.nested.x, 1.0);
+ try std.testing.expectEqual(nested_vec3_o.nested.y, 2.0);
+ try std.testing.expectEqual(nested_vec3_o.nested.z, 3.0);
+
+ const hld = packed struct {
+ c: u64,
+ d: u32,
+ };
+
+ const mld = packed struct {
+ h: u64,
+ i: u64,
+ };
+
+ const a = packed struct {
+ b: hld,
+ g: mld,
+ };
+
+ var arg = a{ .b = hld{ .c = 1, .d = 2 }, .g = mld{ .h = 6, .i = 8 } };
+ try std.testing.expect(arg.b.c == 1);
+ try std.testing.expect(arg.b.d == 2);
+ try std.testing.expect(arg.g.h == 6);
+ try std.testing.expect(arg.g.i == 8);
+}
+
+test "runtime init of unnamed packed struct type" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+
+ var z: u8 = 123;
+ try (packed struct {
+ x: u8,
+ pub fn m(s: @This()) !void {
+ try expect(s.x == 123);
+ }
+ }{ .x = z }).m();
+}
diff --git a/test/behavior/packed_struct_explicit_backing_int.zig b/test/behavior/packed_struct_explicit_backing_int.zig
new file mode 100644
index 0000000000..165e94fd4e
--- /dev/null
+++ b/test/behavior/packed_struct_explicit_backing_int.zig
@@ -0,0 +1,53 @@
+const std = @import("std");
+const builtin = @import("builtin");
+const assert = std.debug.assert;
+const expectEqual = std.testing.expectEqual;
+const native_endian = builtin.cpu.arch.endian();
+
+test "packed struct explicit backing integer" {
+ assert(builtin.zig_backend != .stage1);
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+
+ const S1 = packed struct { a: u8, b: u8, c: u8 };
+
+ const S2 = packed struct(i24) { d: u8, e: u8, f: u8 };
+
+ const S3 = packed struct { x: S1, y: S2 };
+ const S3Padded = packed struct(u64) { s3: S3, pad: u16 };
+
+ try expectEqual(48, @bitSizeOf(S3));
+ try expectEqual(@sizeOf(u48), @sizeOf(S3));
+
+ try expectEqual(3, @offsetOf(S3, "y"));
+ try expectEqual(24, @bitOffsetOf(S3, "y"));
+
+ if (native_endian == .Little) {
+ const s3 = @bitCast(S3Padded, @as(u64, 0xe952d5c71ff4)).s3;
+ try expectEqual(@as(u8, 0xf4), s3.x.a);
+ try expectEqual(@as(u8, 0x1f), s3.x.b);
+ try expectEqual(@as(u8, 0xc7), s3.x.c);
+ try expectEqual(@as(u8, 0xd5), s3.y.d);
+ try expectEqual(@as(u8, 0x52), s3.y.e);
+ try expectEqual(@as(u8, 0xe9), s3.y.f);
+ }
+
+ const S4 = packed struct { a: i32, b: i8 };
+ const S5 = packed struct(u80) { a: i32, b: i8, c: S4 };
+ const S6 = packed struct(i80) { a: i32, b: S4, c: i8 };
+
+ const expectedBitSize = 80;
+ const expectedByteSize = @sizeOf(u80);
+ try expectEqual(expectedBitSize, @bitSizeOf(S5));
+ try expectEqual(expectedByteSize, @sizeOf(S5));
+ try expectEqual(expectedBitSize, @bitSizeOf(S6));
+ try expectEqual(expectedByteSize, @sizeOf(S6));
+
+ try expectEqual(5, @offsetOf(S5, "c"));
+ try expectEqual(40, @bitOffsetOf(S5, "c"));
+ try expectEqual(9, @offsetOf(S6, "c"));
+ try expectEqual(72, @bitOffsetOf(S6, "c"));
+}
diff --git a/test/behavior/popcount.zig b/test/behavior/popcount.zig
index dbfeb64111..e061814692 100644
--- a/test/behavior/popcount.zig
+++ b/test/behavior/popcount.zig
@@ -18,53 +18,54 @@ test "@popCount 128bit integer" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
comptime {
- try expect(@popCount(u128, @as(u128, 0b11111111000110001100010000100001000011000011100101010001)) == 24);
- try expect(@popCount(i128, @as(i128, 0b11111111000110001100010000100001000011000011100101010001)) == 24);
+ try expect(@popCount(@as(u128, 0b11111111000110001100010000100001000011000011100101010001)) == 24);
+ try expect(@popCount(@as(i128, 0b11111111000110001100010000100001000011000011100101010001)) == 24);
}
{
var x: u128 = 0b11111111000110001100010000100001000011000011100101010001;
- try expect(@popCount(u128, x) == 24);
+ try expect(@popCount(x) == 24);
}
- try expect(@popCount(i128, @as(i128, 0b11111111000110001100010000100001000011000011100101010001)) == 24);
+ try expect(@popCount(@as(i128, 0b11111111000110001100010000100001000011000011100101010001)) == 24);
}
fn testPopCountIntegers() !void {
{
var x: u32 = 0xffffffff;
- try expect(@popCount(u32, x) == 32);
+ try expect(@popCount(x) == 32);
}
{
var x: u5 = 0x1f;
- try expect(@popCount(u5, x) == 5);
+ try expect(@popCount(x) == 5);
}
{
var x: u32 = 0xaa;
- try expect(@popCount(u32, x) == 4);
+ try expect(@popCount(x) == 4);
}
{
var x: u32 = 0xaaaaaaaa;
- try expect(@popCount(u32, x) == 16);
+ try expect(@popCount(x) == 16);
}
{
var x: u32 = 0xaaaaaaaa;
- try expect(@popCount(u32, x) == 16);
+ try expect(@popCount(x) == 16);
}
{
var x: i16 = -1;
- try expect(@popCount(i16, x) == 16);
+ try expect(@popCount(x) == 16);
}
{
var x: i8 = -120;
- try expect(@popCount(i8, x) == 2);
+ try expect(@popCount(x) == 2);
}
comptime {
- try expect(@popCount(u8, @bitCast(u8, @as(i8, -120))) == 2);
+ try expect(@popCount(@bitCast(u8, @as(i8, -120))) == 2);
}
}
test "@popCount vectors" {
+ if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@@ -79,13 +80,13 @@ fn testPopCountVectors() !void {
{
var x: @Vector(8, u32) = [1]u32{0xffffffff} ** 8;
const expected = [1]u6{32} ** 8;
- const result: [8]u6 = @popCount(u32, x);
+ const result: [8]u6 = @popCount(x);
try expect(std.mem.eql(u6, &expected, &result));
}
{
var x: @Vector(8, i16) = [1]i16{-1} ** 8;
const expected = [1]u5{16} ** 8;
- const result: [8]u5 = @popCount(i16, x);
+ const result: [8]u5 = @popCount(x);
try expect(std.mem.eql(u5, &expected, &result));
}
}
diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig
index 377cbb56f4..12c874f8ba 100644
--- a/test/behavior/struct.zig
+++ b/test/behavior/struct.zig
@@ -147,7 +147,7 @@ test "fn call of struct field" {
return 13;
}
- fn callStructField(foo: Foo) i32 {
+ fn callStructField(comptime foo: Foo) i32 {
return foo.ptr();
}
};
@@ -963,7 +963,7 @@ test "tuple assigned to variable" {
test "comptime struct field" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.stage2_arch == .arm) return error.SkipZigTest; // TODO
+ if (builtin.cpu.arch == .arm) return error.SkipZigTest; // TODO
const T = struct {
a: i32,
diff --git a/test/behavior/switch.zig b/test/behavior/switch.zig
index 4e86bcadeb..d218fb6bc6 100644
--- a/test/behavior/switch.zig
+++ b/test/behavior/switch.zig
@@ -531,6 +531,7 @@ test "switch with null and T peer types and inferred result location type" {
test "switch prongs with cases with identical payload types" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
const Union = union(enum) {
A: usize,
diff --git a/test/behavior/tuple.zig b/test/behavior/tuple.zig
index 14297bd61c..2b715c3b23 100644
--- a/test/behavior/tuple.zig
+++ b/test/behavior/tuple.zig
@@ -290,3 +290,41 @@ test "coerce tuple to tuple" {
};
try S.foo(.{123});
}
+
+test "tuple type with void field" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ const T = std.meta.Tuple(&[_]type{void});
+ const x = T{{}};
+ try expect(@TypeOf(x[0]) == void);
+}
+
+test "zero sized struct in tuple handled correctly" {
+ const State = struct {
+ const Self = @This();
+ data: @Type(.{
+ .Struct = .{
+ .is_tuple = true,
+ .layout = .Auto,
+ .decls = &.{},
+ .fields = &.{.{
+ .name = "0",
+ .field_type = struct {},
+ .default_value = null,
+ .is_comptime = false,
+ .alignment = 0,
+ }},
+ },
+ }),
+
+ pub fn do(this: Self) usize {
+ return @sizeOf(@TypeOf(this));
+ }
+ };
+
+ var s: State = undefined;
+ try expect(s.do() == 0);
+}
diff --git a/test/behavior/type.zig b/test/behavior/type.zig
index 4aec553527..8cef86b5dd 100644
--- a/test/behavior/type.zig
+++ b/test/behavior/type.zig
@@ -513,6 +513,11 @@ test "Type.Fn" {
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (true) {
+ // https://github.com/ziglang/zig/issues/12360
+ return error.SkipZigTest;
+ }
+
const some_opaque = opaque {};
const some_ptr = *some_opaque;
const T = fn (c_int, some_ptr) callconv(.C) void;
diff --git a/test/behavior/type_info.zig b/test/behavior/type_info.zig
index b1012e69c8..968c3e7490 100644
--- a/test/behavior/type_info.zig
+++ b/test/behavior/type_info.zig
@@ -293,6 +293,7 @@ test "type info: struct info" {
fn testStruct() !void {
const unpacked_struct_info = @typeInfo(TestStruct);
try expect(unpacked_struct_info.Struct.is_tuple == false);
+ try expect(unpacked_struct_info.Struct.backing_integer == null);
try expect(unpacked_struct_info.Struct.fields[0].alignment == @alignOf(u32));
try expect(@ptrCast(*const u32, unpacked_struct_info.Struct.fields[0].default_value.?).* == 4);
try expect(mem.eql(u8, "foobar", @ptrCast(*const *const [6:0]u8, unpacked_struct_info.Struct.fields[1].default_value.?).*));
@@ -315,6 +316,7 @@ fn testPackedStruct() !void {
try expect(struct_info == .Struct);
try expect(struct_info.Struct.is_tuple == false);
try expect(struct_info.Struct.layout == .Packed);
+ try expect(struct_info.Struct.backing_integer == u128);
try expect(struct_info.Struct.fields.len == 4);
try expect(struct_info.Struct.fields[0].alignment == 0);
try expect(struct_info.Struct.fields[2].field_type == f32);
@@ -326,7 +328,7 @@ fn testPackedStruct() !void {
}
const TestPackedStruct = packed struct {
- fieldA: usize,
+ fieldA: u64,
fieldB: void,
fieldC: f32,
fieldD: u32 = 4,
diff --git a/test/behavior/typename.zig b/test/behavior/typename.zig
index e0cb5ba68c..3bc8c58389 100644
--- a/test/behavior/typename.zig
+++ b/test/behavior/typename.zig
@@ -235,3 +235,14 @@ test "local variable" {
try expectEqualStrings("behavior.typename.test.local variable.Qux", @typeName(Qux));
try expectEqualStrings("behavior.typename.test.local variable.Quux", @typeName(Quux));
}
+
+test "comptime parameters not converted to anytype in function type" {
+ if (builtin.zig_backend == .stage1) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+
+ const T = fn (fn (type) void, void) void;
+ try expectEqualStrings("fn(fn(type) void, void) void", @typeName(T));
+}
diff --git a/test/behavior/union.zig b/test/behavior/union.zig
index 2f6fa78f0c..5d6b084be5 100644
--- a/test/behavior/union.zig
+++ b/test/behavior/union.zig
@@ -1,6 +1,7 @@
const builtin = @import("builtin");
const std = @import("std");
const expect = std.testing.expect;
+const assert = std.debug.assert;
const expectEqual = std.testing.expectEqual;
const Tag = std.meta.Tag;
@@ -744,7 +745,7 @@ fn setAttribute(attr: Attribute) void {
_ = attr;
}
-fn Setter(attr: Attribute) type {
+fn Setter(comptime attr: Attribute) type {
return struct {
fn set() void {
setAttribute(attr);
@@ -1065,6 +1066,8 @@ test "@unionInit on union with tag but no fields" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
const S = struct {
const Type = enum(u8) { no_op = 105 };
@@ -1079,11 +1082,7 @@ test "@unionInit on union with tag but no fields" {
};
comptime {
- if (builtin.zig_backend == .stage1) {
- // stage1 gets the wrong answer here
- } else {
- std.debug.assert(@sizeOf(Data) == 0);
- }
+ assert(@sizeOf(Data) == 1);
}
fn doTheTest() !void {
@@ -1256,3 +1255,72 @@ test "return an extern union from C calling convention" {
});
try expect(u.d == 4.0);
}
+
+test "noreturn field in union" {
+ if (builtin.zig_backend == .stage1) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+
+ const U = union(enum) {
+ a: u32,
+ b: noreturn,
+ c: noreturn,
+ };
+ var a = U{ .a = 1 };
+ var count: u32 = 0;
+ if (a == .b) @compileError("bad");
+ switch (a) {
+ .a => count += 1,
+ .b => |val| {
+ _ = val;
+ @compileError("bad");
+ },
+ .c => @compileError("bad"),
+ }
+ switch (a) {
+ .a => count += 1,
+ .b, .c => @compileError("bad"),
+ }
+ switch (a) {
+ .a, .b, .c => {
+ count += 1;
+ try expect(a == .a);
+ },
+ }
+ switch (a) {
+ .a => count += 1,
+ else => @compileError("bad"),
+ }
+ switch (a) {
+ else => {
+ count += 1;
+ try expect(a == .a);
+ },
+ }
+ try expect(count == 5);
+}
+
+test "union and enum field order doesn't match" {
+ if (builtin.zig_backend == .stage1) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+
+ const MyTag = enum(u32) {
+ b = 1337,
+ a = 1666,
+ };
+ const MyUnion = union(MyTag) {
+ a: f32,
+ b: void,
+ };
+ var x: MyUnion = .{ .a = 666 };
+ switch (x) {
+ .a => |my_f32| {
+ try expect(@TypeOf(my_f32) == f32);
+ },
+ .b => unreachable,
+ }
+ x = .b;
+ try expect(x == .b);
+}
diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig
index c13db2e262..829d20057a 100644
--- a/test/behavior/vector.zig
+++ b/test/behavior/vector.zig
@@ -807,6 +807,23 @@ test "vector reduce operation" {
comptime try S.doTheTest();
}
+test "vector @reduce comptime" {
+ if (builtin.zig_backend == .stage1) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+
+ const value = @Vector(4, i32){ 1, -1, 1, -1 };
+ const result = value > @splat(4, @as(i32, 0));
+ // result is { true, false, true, false };
+ comptime try expect(@TypeOf(result) == @Vector(4, bool));
+ const is_all_true = @reduce(.And, result);
+ comptime try expect(@TypeOf(is_all_true) == bool);
+ try expect(is_all_true == false);
+}
+
test "mask parameter of @shuffle is comptime scope" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
@@ -1094,3 +1111,26 @@ test "loading the second vector from a slice of vectors" {
var a4 = a[1][1];
try expect(a4 == 3);
}
+
+test "array of vectors is copied" {
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+
+ const Vec3 = @Vector(3, i32);
+ var points = [_]Vec3{
+ Vec3{ 404, -588, -901 },
+ Vec3{ 528, -643, 409 },
+ Vec3{ -838, 591, 734 },
+ Vec3{ 390, -675, -793 },
+ Vec3{ -537, -823, -458 },
+ Vec3{ -485, -357, 347 },
+ Vec3{ -345, -311, 381 },
+ Vec3{ -661, -816, -575 },
+ };
+ var points2: [20]Vec3 = undefined;
+ points2[0..points.len].* = points;
+ try std.testing.expectEqual(points2[6], Vec3{ -345, -311, 381 });
+}
diff --git a/test/behavior/void.zig b/test/behavior/void.zig
index 66b969d0bf..4cce1cf71b 100644
--- a/test/behavior/void.zig
+++ b/test/behavior/void.zig
@@ -45,3 +45,9 @@ test "void array as a local variable initializer" {
var x = [_]void{{}} ** 1004;
_ = x[0];
}
+
+const void_constant = {};
+test "reference to void constants" {
+ var a = void_constant;
+ _ = a;
+}
diff --git a/test/cases/compile_errors/AstGen_comptime_known_struct_is_resolved_before_error.zig b/test/cases/compile_errors/AstGen_comptime_known_struct_is_resolved_before_error.zig
new file mode 100644
index 0000000000..8e9358c6f4
--- /dev/null
+++ b/test/cases/compile_errors/AstGen_comptime_known_struct_is_resolved_before_error.zig
@@ -0,0 +1,19 @@
+const S1 = struct {
+ a: S2,
+};
+const S2 = struct {
+ b: fn () void,
+};
+pub export fn entry() void {
+ var s: S1 = undefined;
+ _ = s;
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :8:12: error: variable of type 'tmp.S1' must be const or comptime
+// :2:8: note: struct requires comptime because of this field
+// :5:8: note: struct requires comptime because of this field
+// :5:8: note: use '*const fn() void' for a function pointer type
diff --git a/test/cases/compile_errors/access_inactive_union_field_comptime.zig b/test/cases/compile_errors/access_inactive_union_field_comptime.zig
new file mode 100644
index 0000000000..d990a85f9e
--- /dev/null
+++ b/test/cases/compile_errors/access_inactive_union_field_comptime.zig
@@ -0,0 +1,23 @@
+const Enum = enum(u32) { a, b };
+const TaggedUnion = union(Enum) {
+ b: []const u8,
+ a: []const u8,
+};
+pub export fn entry() void {
+ const result = TaggedUnion{ .b = "b" };
+ _ = result.b;
+ _ = result.a;
+}
+pub export fn entry1() void {
+ const result = TaggedUnion{ .b = "b" };
+ _ = &result.b;
+ _ = &result.a;
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :9:15: error: access of union field 'a' while field 'b' is active
+// :2:21: note: union declared here
+// :14:16: error: access of union field 'a' while field 'b' is active
diff --git a/test/cases/compile_errors/ambiguous_coercion_of_division_operands.zig b/test/cases/compile_errors/ambiguous_coercion_of_division_operands.zig
new file mode 100644
index 0000000000..f3e51a1bed
--- /dev/null
+++ b/test/cases/compile_errors/ambiguous_coercion_of_division_operands.zig
@@ -0,0 +1,23 @@
+export fn entry1() void {
+ var f: f32 = 54.0 / 5;
+ _ = f;
+}
+export fn entry2() void {
+ var f: f32 = 54 / 5.0;
+ _ = f;
+}
+export fn entry3() void {
+ var f: f32 = 55.0 / 5;
+ _ = f;
+}
+export fn entry4() void {
+ var f: f32 = 55 / 5.0;
+ _ = f;
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :2:23: error: ambiguous coercion of division operands 'comptime_float' and 'comptime_int'; non-zero remainder '4'
+// :6:21: error: ambiguous coercion of division operands 'comptime_int' and 'comptime_float'; non-zero remainder '4'
diff --git a/test/cases/compile_errors/bogus_method_call_on_slice.zig b/test/cases/compile_errors/bogus_method_call_on_slice.zig
index b5cb5e472a..ed18f43f48 100644
--- a/test/cases/compile_errors/bogus_method_call_on_slice.zig
+++ b/test/cases/compile_errors/bogus_method_call_on_slice.zig
@@ -3,9 +3,17 @@ fn f(m: []const u8) void {
m.copy(u8, self[0..], m);
}
export fn entry() usize { return @sizeOf(@TypeOf(&f)); }
+pub export fn entry1() void {
+ .{}.bar();
+}
+pub export fn entry2() void {
+ .{ .foo = 1 }.bar();
+}
// error
// backend=stage2
// target=native
//
+// :7:8: error: no field or member function named 'bar' in '@TypeOf(.{})'
+// :10:18: error: no field or member function named 'bar' in 'struct{comptime foo: comptime_int = 1}'
// :3:6: error: no field or member function named 'copy' in '[]const u8'
diff --git a/test/cases/compile_errors/calling_function_with_naked_calling_convention.zig b/test/cases/compile_errors/calling_function_with_naked_calling_convention.zig
new file mode 100644
index 0000000000..54bf585425
--- /dev/null
+++ b/test/cases/compile_errors/calling_function_with_naked_calling_convention.zig
@@ -0,0 +1,11 @@
+export fn entry() void {
+ foo();
+}
+fn foo() callconv(.Naked) void { }
+
+// error
+// backend=llvm
+// target=native
+//
+// :2:5: error: unable to call function with naked calling convention
+// :4:1: note: function declared here
diff --git a/test/cases/compile_errors/compile_time_null_ptr_cast.zig b/test/cases/compile_errors/compile_time_null_ptr_cast.zig
new file mode 100644
index 0000000000..d3750c8654
--- /dev/null
+++ b/test/cases/compile_errors/compile_time_null_ptr_cast.zig
@@ -0,0 +1,11 @@
+comptime {
+ var opt_ptr: ?*i32 = null;
+ const ptr = @ptrCast(*i32, opt_ptr);
+ _ = ptr;
+}
+
+// error
+// backend=llvm
+// target=native
+//
+// :3:32: error: null pointer casted to type *i32
diff --git a/test/cases/compile_errors/compile_time_undef_ptr_cast.zig b/test/cases/compile_errors/compile_time_undef_ptr_cast.zig
new file mode 100644
index 0000000000..14edd293de
--- /dev/null
+++ b/test/cases/compile_errors/compile_time_undef_ptr_cast.zig
@@ -0,0 +1,11 @@
+comptime {
+ var undef_ptr: *i32 = undefined;
+ const ptr = @ptrCast(*i32, undef_ptr);
+ _ = ptr;
+}
+
+// error
+// backend=llvm
+// target=native
+//
+// :3:32: error: use of undefined value here causes undefined behavior
diff --git a/test/cases/compile_errors/comptime_parameter_not_declared_as_such.zig b/test/cases/compile_errors/comptime_parameter_not_declared_as_such.zig
new file mode 100644
index 0000000000..008d14f2fc
--- /dev/null
+++ b/test/cases/compile_errors/comptime_parameter_not_declared_as_such.zig
@@ -0,0 +1,24 @@
+fn f(_: anytype) void {}
+const T = *const fn (anytype) void;
+fn g(h: T) void {
+ h({});
+}
+pub export fn entry() void {
+ g(f);
+}
+
+pub fn comptimeMod(num: anytype, denom: comptime_int) void {
+ _ = num;
+ _ = denom;
+}
+
+pub export fn entry1() void {
+ _ = comptimeMod(1, 2);
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :3:6: error: parameter of type '*const fn(anytype) void' must be declared comptime
+// :10:34: error: parameter of type 'comptime_int' must be declared comptime
diff --git a/test/cases/compile_errors/decl_shadows_local.zig b/test/cases/compile_errors/decl_shadows_local.zig
new file mode 100644
index 0000000000..cb48cafa45
--- /dev/null
+++ b/test/cases/compile_errors/decl_shadows_local.zig
@@ -0,0 +1,22 @@
+fn foo(a: usize) void {
+ struct {
+ const a = 1;
+ };
+}
+fn bar(a: usize) void {
+ struct {
+ const b = struct {
+ const a = 1;
+ };
+ };
+ _ = a;
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :3:15: error: redeclaration of function parameter 'a'
+// :1:8: note: previous declaration here
+// :9:19: error: redeclaration of function parameter 'a'
+// :6:8: note: previous declaration here
diff --git a/test/cases/compile_errors/duplicate_field_in_discarded_anon_init.zig b/test/cases/compile_errors/duplicate_field_in_discarded_anon_init.zig
new file mode 100644
index 0000000000..6f850e6fe4
--- /dev/null
+++ b/test/cases/compile_errors/duplicate_field_in_discarded_anon_init.zig
@@ -0,0 +1,10 @@
+pub export fn entry() void {
+ _ = .{ .a = 0, .a = 1 };
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :2:21: error: duplicate field
+// :2:13: note: other field here
diff --git a/test/cases/compile_errors/enum_with_0_fields.zig b/test/cases/compile_errors/enum_with_0_fields.zig
deleted file mode 100644
index f34065b69d..0000000000
--- a/test/cases/compile_errors/enum_with_0_fields.zig
+++ /dev/null
@@ -1,7 +0,0 @@
-const Foo = enum {};
-
-// error
-// backend=stage2
-// target=native
-//
-// :1:13: error: enum declarations must have at least one tag
diff --git a/test/cases/compile_errors/error_in_typeof_param.zig b/test/cases/compile_errors/error_in_typeof_param.zig
new file mode 100644
index 0000000000..747cdf3df6
--- /dev/null
+++ b/test/cases/compile_errors/error_in_typeof_param.zig
@@ -0,0 +1,14 @@
+fn getSize() usize {
+ return 2;
+}
+pub fn expectEqual(expected: anytype, _: @TypeOf(expected)) !void {}
+pub export fn entry() void {
+ try expectEqual(2, getSize());
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :6:31: error: unable to resolve comptime value
+// :6:31: note: argument to parameter with comptime only type must be comptime known
diff --git a/test/cases/compile_errors/errors_in_for_loop_bodies_are_propagated.zig b/test/cases/compile_errors/errors_in_for_loop_bodies_are_propagated.zig
index bc30b5cbe6..016dfb1412 100644
--- a/test/cases/compile_errors/errors_in_for_loop_bodies_are_propagated.zig
+++ b/test/cases/compile_errors/errors_in_for_loop_bodies_are_propagated.zig
@@ -1,10 +1,10 @@
pub export fn entry() void {
var arr: [100]u8 = undefined;
- for (arr) |bits| _ = @popCount(bits);
+ for (arr) |bits| _ = @popCount(u8, bits);
}
// error
// backend=stage2
// target=native
//
-// :3:26: error: expected 2 arguments, found 1
+// :3:26: error: expected 1 argument, found 2
diff --git a/test/cases/compile_errors/exact division failure.zig b/test/cases/compile_errors/exact division failure.zig
new file mode 100644
index 0000000000..d134e0e2fb
--- /dev/null
+++ b/test/cases/compile_errors/exact division failure.zig
@@ -0,0 +1,10 @@
+comptime {
+ const x = @divExact(10, 3);
+ _ = x;
+}
+
+// error
+// backend=llvm
+// target=native
+//
+// :2:15: error: exact division produced remainder
diff --git a/test/cases/compile_errors/explain_why_fn_is_called_at_comptime.zig b/test/cases/compile_errors/explain_why_fn_is_called_at_comptime.zig
new file mode 100644
index 0000000000..04f64c2303
--- /dev/null
+++ b/test/cases/compile_errors/explain_why_fn_is_called_at_comptime.zig
@@ -0,0 +1,23 @@
+const S = struct {
+ fnPtr: fn () void,
+ a: u8,
+};
+fn bar() void {}
+
+fn foo(comptime a: *u8) S {
+ return .{ .fnPtr = bar, .a = a.* };
+}
+pub export fn entry() void {
+ var a: u8 = 1;
+ _ = foo(&a);
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :12:13: error: unable to resolve comptime value
+// :12:13: note: argument to function being called at comptime must be comptime known
+// :7:25: note: function is being called at comptime because it returns a comptime only type 'tmp.S'
+// :2:12: note: struct requires comptime because of this field
+// :2:12: note: use '*const fn() void' for a function pointer type
diff --git a/test/cases/compile_errors/explain_why_generic_fn_is_called_at_comptime.zig b/test/cases/compile_errors/explain_why_generic_fn_is_called_at_comptime.zig
new file mode 100644
index 0000000000..ccd828bd5c
--- /dev/null
+++ b/test/cases/compile_errors/explain_why_generic_fn_is_called_at_comptime.zig
@@ -0,0 +1,22 @@
+fn S(comptime PtrTy: type) type {
+ return struct {
+ fnPtr: PtrTy,
+ a: u8,
+ };
+}
+fn bar() void {}
+
+fn foo(a: u8, comptime PtrTy: type) S(PtrTy) {
+ return .{ .fnPtr = bar, .a = a };
+}
+pub export fn entry() void {
+ var a: u8 = 1;
+ _ = foo(a, fn () void);
+}
+// error
+// backend=stage2
+// target=native
+//
+// :14:13: error: unable to resolve comptime value
+// :14:13: note: argument to function being called at comptime must be comptime known
+// :9:38: note: generic function is instantiated with a comptime only return type
diff --git a/test/cases/compile_errors/export_function_with_comptime_parameter.zig b/test/cases/compile_errors/export_function_with_comptime_parameter.zig
index 94c8f6de50..4491a98e9c 100644
--- a/test/cases/compile_errors/export_function_with_comptime_parameter.zig
+++ b/test/cases/compile_errors/export_function_with_comptime_parameter.zig
@@ -6,4 +6,4 @@ export fn foo(comptime x: anytype, y: i32) i32{
// backend=stage2
// target=native
//
-// :1:15: error: generic parameters not allowed in function with calling convention 'C'
+// :1:15: error: comptime parameters not allowed in function with calling convention 'C'
diff --git a/test/cases/compile_errors/extern_function_with_comptime_parameter.zig b/test/cases/compile_errors/extern_function_with_comptime_parameter.zig
index cac4c7e5e2..de69fa409f 100644
--- a/test/cases/compile_errors/extern_function_with_comptime_parameter.zig
+++ b/test/cases/compile_errors/extern_function_with_comptime_parameter.zig
@@ -12,9 +12,6 @@ comptime { _ = entry2; }
// backend=stage2
// target=native
//
-// :5:12: error: extern function cannot be generic
-// :5:30: note: function is generic because of this parameter
-// :6:12: error: extern function cannot be generic
-// :6:30: note: function is generic because of this parameter
-// :1:8: error: extern function cannot be generic
-// :1:15: note: function is generic because of this parameter
+// :1:15: error: comptime parameters not allowed in function with calling convention 'C'
+// :5:30: error: comptime parameters not allowed in function with calling convention 'C'
+// :6:30: error: generic parameters not allowed in function with calling convention 'C'
diff --git a/test/cases/compile_errors/float exact division failure.zig b/test/cases/compile_errors/float exact division failure.zig
new file mode 100644
index 0000000000..c09defc56e
--- /dev/null
+++ b/test/cases/compile_errors/float exact division failure.zig
@@ -0,0 +1,10 @@
+comptime {
+ const x = @divExact(10.0, 3.0);
+ _ = x;
+}
+
+// error
+// backend=llvm
+// target=native
+//
+// :2:15: error: exact division produced remainder
diff --git a/test/cases/compile_errors/function_type_named.zig b/test/cases/compile_errors/function_type_named.zig
new file mode 100644
index 0000000000..ad670fe88c
--- /dev/null
+++ b/test/cases/compile_errors/function_type_named.zig
@@ -0,0 +1,7 @@
+const aFunc = fn someFunc(x: i32) void;
+
+// error
+// backend=stage2
+// target=native
+//
+// :1:18: error: function type cannot have a name
diff --git a/test/cases/compile_errors/int_literal_passed_as_variadic_arg.zig b/test/cases/compile_errors/int_literal_passed_as_variadic_arg.zig
new file mode 100644
index 0000000000..be9ffaa884
--- /dev/null
+++ b/test/cases/compile_errors/int_literal_passed_as_variadic_arg.zig
@@ -0,0 +1,11 @@
+extern fn printf([*:0]const u8, ...) c_int;
+
+pub export fn entry() void {
+ _ = printf("%d %d %d %d\n", 1, 2, 3, 4);
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :4:33: error: integer and float literals in var args function must be casted
diff --git a/test/cases/compile_errors/invalid_error_union_payload_type.zig b/test/cases/compile_errors/invalid_error_union_payload_type.zig
new file mode 100644
index 0000000000..f8646d9450
--- /dev/null
+++ b/test/cases/compile_errors/invalid_error_union_payload_type.zig
@@ -0,0 +1,13 @@
+comptime {
+ _ = anyerror!anyopaque;
+}
+comptime {
+ _ = anyerror!anyerror;
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :2:18: error: error union with payload of opaque type 'anyopaque' not allowed
+// :5:18: error: error union with payload of error set type 'anyerror' not allowed
diff --git a/test/cases/compile_errors/invalid_optional_payload_type.zig b/test/cases/compile_errors/invalid_optional_payload_type.zig
new file mode 100644
index 0000000000..0058cd5e36
--- /dev/null
+++ b/test/cases/compile_errors/invalid_optional_payload_type.zig
@@ -0,0 +1,13 @@
+comptime {
+ _ = ?anyopaque;
+}
+comptime {
+ _ = ?@TypeOf(null);
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :2:10: error: opaque type 'anyopaque' cannot be optional
+// :5:10: error: type '@TypeOf(null)' cannot be optional
diff --git a/test/cases/compile_errors/invalid_underscore_placement_in_int_literal-1.zig b/test/cases/compile_errors/invalid_underscore_placement_in_int_literal-1.zig
index 868ea8d42a..b90c733dea 100644
--- a/test/cases/compile_errors/invalid_underscore_placement_in_int_literal-1.zig
+++ b/test/cases/compile_errors/invalid_underscore_placement_in_int_literal-1.zig
@@ -1,5 +1,5 @@
fn main() void {
- var bad: u128 = 0010_;
+ var bad: u128 = 10_;
_ = bad;
}
@@ -8,4 +8,4 @@ fn main() void {
// target=native
//
// :2:21: error: expected expression, found 'invalid bytes'
-// :2:26: note: invalid byte: ';'
+// :2:24: note: invalid byte: ';'
diff --git a/test/cases/compile_errors/leading_zero_in_integer.zig b/test/cases/compile_errors/leading_zero_in_integer.zig
new file mode 100644
index 0000000000..a818a3d75d
--- /dev/null
+++ b/test/cases/compile_errors/leading_zero_in_integer.zig
@@ -0,0 +1,27 @@
+export fn entry1() void {
+ const T = u000123;
+ _ = T;
+}
+export fn entry2() void {
+ _ = i0;
+ _ = u0;
+ var x: i01 = 1;
+ _ = x;
+}
+export fn entry3() void {
+ _ = 000123;
+}
+export fn entry4() void {
+ _ = 01;
+}
+
+// error
+// backend=llvm
+// target=native
+//
+// :2:15: error: primitive integer type 'u000123' has leading zero
+// :8:12: error: primitive integer type 'i01' has leading zero
+// :12:9: error: integer literal '000123' has leading zero
+// :12:9: note: use '0o' prefix for octal literals
+// :15:9: error: integer literal '01' has leading zero
+// :15:9: note: use '0o' prefix for octal literals
diff --git a/test/cases/compile_errors/member_function_arg_mismatch.zig b/test/cases/compile_errors/member_function_arg_mismatch.zig
new file mode 100644
index 0000000000..b739be9544
--- /dev/null
+++ b/test/cases/compile_errors/member_function_arg_mismatch.zig
@@ -0,0 +1,15 @@
+const S = struct {
+ a: u32,
+ fn foo(_: *S, _: u32, _: bool) void {}
+};
+pub export fn entry() void {
+ var s: S = undefined;
+ s.foo(true);
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :7:6: error: member function expected 2 argument(s), found 1
+// :3:5: note: function declared here
diff --git a/test/cases/compile_errors/non_comptime_param_in_comptime_function.zig b/test/cases/compile_errors/non_comptime_param_in_comptime_function.zig
new file mode 100644
index 0000000000..758166dd7f
--- /dev/null
+++ b/test/cases/compile_errors/non_comptime_param_in_comptime_function.zig
@@ -0,0 +1,36 @@
+fn F(val: anytype) type {
+ _ = val;
+ return struct {};
+}
+export fn entry() void {
+ _ = F(void{});
+}
+const S = struct {
+ foo: fn () void,
+};
+fn bar(_: u32) S {
+ return undefined;
+}
+export fn entry1() void {
+ _ = bar();
+}
+// prioritize other return type errors
+fn foo(a: u32) callconv(.C) comptime_int {
+ return a;
+}
+export fn entry2() void {
+ _ = foo(1);
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :1:20: error: function with comptime only return type 'type' requires all parameters to be comptime
+// :1:20: note: types are not available at runtime
+// :1:6: note: param 'val' is required to be comptime
+// :11:16: error: function with comptime only return type 'tmp.S' requires all parameters to be comptime
+// :9:10: note: struct requires comptime because of this field
+// :9:10: note: use '*const fn() void' for a function pointer type
+// :11:8: note: param is required to be comptime
+// :18:29: error: return type 'comptime_int' not allowed in function with calling convention 'C'
diff --git a/test/cases/compile_errors/non_constant_expression_in_array_size.zig b/test/cases/compile_errors/non_constant_expression_in_array_size.zig
index e702246001..1dc8b50464 100644
--- a/test/cases/compile_errors/non_constant_expression_in_array_size.zig
+++ b/test/cases/compile_errors/non_constant_expression_in_array_size.zig
@@ -11,4 +11,4 @@ export fn entry() usize { return @offsetOf(Foo, "y"); }
// target=native
//
// :5:25: error: cannot load runtime value in comptime block
-// :2:15: note: called from here
+// :2:12: note: called from here
diff --git a/test/cases/compile_errors/noreturn_struct_field.zig b/test/cases/compile_errors/noreturn_struct_field.zig
new file mode 100644
index 0000000000..90b243c31d
--- /dev/null
+++ b/test/cases/compile_errors/noreturn_struct_field.zig
@@ -0,0 +1,12 @@
+const S = struct {
+ s: noreturn,
+};
+comptime {
+ _ = @typeInfo(S);
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :2:5: error: struct fields cannot be 'noreturn'
diff --git a/test/cases/compile_errors/not_an_enum_type.zig b/test/cases/compile_errors/not_an_enum_type.zig
index 063ee8a8d8..bdd03c8db7 100644
--- a/test/cases/compile_errors/not_an_enum_type.zig
+++ b/test/cases/compile_errors/not_an_enum_type.zig
@@ -17,4 +17,4 @@ const ExpectedVarDeclOrFn = struct {};
// target=native
//
// :4:9: error: expected type '@typeInfo(tmp.Error).Union.tag_type.?', found 'type'
-// :8:1: note: enum declared here
+// :8:15: note: enum declared here
diff --git a/test/cases/compile_errors/packed_struct_backing_int_wrong.zig b/test/cases/compile_errors/packed_struct_backing_int_wrong.zig
new file mode 100644
index 0000000000..cd1b4ec11c
--- /dev/null
+++ b/test/cases/compile_errors/packed_struct_backing_int_wrong.zig
@@ -0,0 +1,55 @@
+export fn entry1() void {
+ _ = @sizeOf(packed struct(u32) {
+ x: u1,
+ y: u24,
+ z: u4,
+ });
+}
+export fn entry2() void {
+ _ = @sizeOf(packed struct(i31) {
+ x: u4,
+ y: u24,
+ z: u4,
+ });
+}
+
+export fn entry3() void {
+ _ = @sizeOf(packed struct(void) {
+ x: void,
+ });
+}
+
+export fn entry4() void {
+ _ = @sizeOf(packed struct(void) {});
+}
+
+export fn entry5() void {
+ _ = @sizeOf(packed struct(noreturn) {});
+}
+
+export fn entry6() void {
+ _ = @sizeOf(packed struct(f64) {
+ x: u32,
+ y: f32,
+ });
+}
+
+export fn entry7() void {
+ _ = @sizeOf(packed struct(*u32) {
+ x: u4,
+ y: u24,
+ z: u4,
+ });
+}
+
+// error
+// backend=llvm
+// target=native
+//
+// :2:31: error: backing integer type 'u32' has bit size 32 but the struct fields have a total bit size of 29
+// :9:31: error: backing integer type 'i31' has bit size 31 but the struct fields have a total bit size of 32
+// :17:31: error: expected backing integer type, found 'void'
+// :23:31: error: expected backing integer type, found 'void'
+// :27:31: error: expected backing integer type, found 'noreturn'
+// :31:31: error: expected backing integer type, found 'f64'
+// :38:31: error: expected backing integer type, found '*u32'
diff --git a/test/cases/compile_errors/popCount-non-integer.zig b/test/cases/compile_errors/popCount-non-integer.zig
index af1c79f5f4..b218416e39 100644
--- a/test/cases/compile_errors/popCount-non-integer.zig
+++ b/test/cases/compile_errors/popCount-non-integer.zig
@@ -1,9 +1,9 @@
export fn entry(x: f32) u32 {
- return @popCount(f32, x);
+ return @popCount(x);
}
// error
// backend=stage2
// target=native
//
-// :2:27: error: expected integer or vector, found 'f32'
+// :2:22: error: expected integer or vector, found 'f32'
diff --git a/test/cases/compile_errors/reify_type_for_exhaustive_enum_with_zero_fields.zig b/test/cases/compile_errors/reify_type_for_exhaustive_enum_with_zero_fields.zig
deleted file mode 100644
index 44876e938a..0000000000
--- a/test/cases/compile_errors/reify_type_for_exhaustive_enum_with_zero_fields.zig
+++ /dev/null
@@ -1,18 +0,0 @@
-const Tag = @Type(.{
- .Enum = .{
- .layout = .Auto,
- .tag_type = u1,
- .fields = &.{},
- .decls = &.{},
- .is_exhaustive = true,
- },
-});
-export fn entry() void {
- _ = @intToEnum(Tag, 0);
-}
-
-// error
-// backend=stage2
-// target=native
-//
-// :1:13: error: enums must have at least one field
diff --git a/test/cases/compile_errors/reify_type_for_tagged_union_with_extra_union_field.zig b/test/cases/compile_errors/reify_type_for_tagged_union_with_extra_union_field.zig
index ee557cd6c2..9d6170b9d0 100644
--- a/test/cases/compile_errors/reify_type_for_tagged_union_with_extra_union_field.zig
+++ b/test/cases/compile_errors/reify_type_for_tagged_union_with_extra_union_field.zig
@@ -31,5 +31,5 @@ export fn entry() void {
// backend=stage2
// target=native
//
-// :13:16: error: no field named 'arst' in enum 'tmp.Tag__enum_266'
+// :13:16: error: no field named 'arst' in enum 'tmp.Tag'
// :1:13: note: enum declared here
diff --git a/test/cases/compile_errors/reify_type_for_union_with_zero_fields.zig b/test/cases/compile_errors/reify_type_for_union_with_zero_fields.zig
deleted file mode 100644
index 0b4f395c81..0000000000
--- a/test/cases/compile_errors/reify_type_for_union_with_zero_fields.zig
+++ /dev/null
@@ -1,17 +0,0 @@
-const Untagged = @Type(.{
- .Union = .{
- .layout = .Auto,
- .tag_type = null,
- .fields = &.{},
- .decls = &.{},
- },
-});
-export fn entry() void {
- _ = Untagged{};
-}
-
-// error
-// backend=stage2
-// target=native
-//
-// :1:18: error: unions must have at least one field
diff --git a/test/cases/compile_errors/runtime_cast_to_union_which_has_non-void_fields.zig b/test/cases/compile_errors/runtime_cast_to_union_which_has_non-void_fields.zig
index c312d6db40..0142f422f4 100644
--- a/test/cases/compile_errors/runtime_cast_to_union_which_has_non-void_fields.zig
+++ b/test/cases/compile_errors/runtime_cast_to_union_which_has_non-void_fields.zig
@@ -18,6 +18,4 @@ fn foo(l: Letter) void {
//
// :11:20: error: runtime coercion from enum 'tmp.Letter' to union 'tmp.Value' which has non-void fields
// :3:5: note: field 'A' has type 'i32'
-// :4:5: note: field 'B' has type 'void'
-// :5:5: note: field 'C' has type 'void'
// :2:15: note: union declared here
diff --git a/test/cases/compile_errors/self_referential_struct_requires_comptime.zig b/test/cases/compile_errors/self_referential_struct_requires_comptime.zig
new file mode 100644
index 0000000000..3ce7571026
--- /dev/null
+++ b/test/cases/compile_errors/self_referential_struct_requires_comptime.zig
@@ -0,0 +1,18 @@
+const S = struct {
+ a: fn () void,
+ b: *S,
+};
+pub export fn entry() void {
+ var s: S = undefined;
+ _ = s;
+}
+
+
+// error
+// backend=stage2
+// target=native
+//
+// :6:12: error: variable of type 'tmp.S' must be const or comptime
+// :2:8: note: struct requires comptime because of this field
+// :2:8: note: use '*const fn() void' for a function pointer type
+// :3:8: note: struct requires comptime because of this field
diff --git a/test/cases/compile_errors/self_referential_union_requires_comptime.zig b/test/cases/compile_errors/self_referential_union_requires_comptime.zig
new file mode 100644
index 0000000000..a2433adde9
--- /dev/null
+++ b/test/cases/compile_errors/self_referential_union_requires_comptime.zig
@@ -0,0 +1,17 @@
+const U = union {
+ a: fn () void,
+ b: *U,
+};
+pub export fn entry() void {
+ var u: U = undefined;
+ _ = u;
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :6:12: error: variable of type 'tmp.U' must be const or comptime
+// :2:8: note: union requires comptime because of this field
+// :2:8: note: use '*const fn() void' for a function pointer type
+// :3:8: note: union requires comptime because of this field
diff --git a/test/cases/compile_errors/stage1/obj/shlExact_shifts_out_1_bits.zig b/test/cases/compile_errors/shlExact_shifts_out_1_bits.zig
similarity index 60%
rename from test/cases/compile_errors/stage1/obj/shlExact_shifts_out_1_bits.zig
rename to test/cases/compile_errors/shlExact_shifts_out_1_bits.zig
index 953c5fec50..b2ab45a8e9 100644
--- a/test/cases/compile_errors/stage1/obj/shlExact_shifts_out_1_bits.zig
+++ b/test/cases/compile_errors/shlExact_shifts_out_1_bits.zig
@@ -4,7 +4,7 @@ comptime {
}
// error
-// backend=stage1
+// backend=llvm
// target=native
//
-// tmp.zig:2:15: error: operation caused overflow
+// :2:15: error: operation caused overflow
diff --git a/test/cases/compile_errors/stage1/obj/shrExact_shifts_out_1_bits.zig b/test/cases/compile_errors/shrExact_shifts_out_1_bits.zig
similarity index 58%
rename from test/cases/compile_errors/stage1/obj/shrExact_shifts_out_1_bits.zig
rename to test/cases/compile_errors/shrExact_shifts_out_1_bits.zig
index 223db76630..dd23c4bcb3 100644
--- a/test/cases/compile_errors/stage1/obj/shrExact_shifts_out_1_bits.zig
+++ b/test/cases/compile_errors/shrExact_shifts_out_1_bits.zig
@@ -4,7 +4,7 @@ comptime {
}
// error
-// backend=stage1
+// backend=llvm
// target=native
//
-// tmp.zig:2:15: error: exact shift shifted out 1 bits
+// :2:15: error: exact shift shifted out 1 bits
diff --git a/test/cases/compile_errors/signed_integer_division.zig b/test/cases/compile_errors/signed_integer_division.zig
new file mode 100644
index 0000000000..7e968ac77e
--- /dev/null
+++ b/test/cases/compile_errors/signed_integer_division.zig
@@ -0,0 +1,9 @@
+export fn foo(a: i32, b: i32) i32 {
+ return a / b;
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :2:14: error: division with 'i32' and 'i32': signed integers must use @divTrunc, @divFloor, or @divExact
diff --git a/test/cases/compile_errors/slice_of_non_array_type.zig b/test/cases/compile_errors/slice_of_non_array_type.zig
new file mode 100644
index 0000000000..734b026038
--- /dev/null
+++ b/test/cases/compile_errors/slice_of_non_array_type.zig
@@ -0,0 +1,9 @@
+comptime {
+ _ = 1[0..];
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :2:10: error: slice of non-array type 'comptime_int'
diff --git a/test/cases/compile_errors/stage1/obj/calling_function_with_naked_calling_convention.zig b/test/cases/compile_errors/stage1/obj/calling_function_with_naked_calling_convention.zig
deleted file mode 100644
index 401f84e687..0000000000
--- a/test/cases/compile_errors/stage1/obj/calling_function_with_naked_calling_convention.zig
+++ /dev/null
@@ -1,11 +0,0 @@
-export fn entry() void {
- foo();
-}
-fn foo() callconv(.Naked) void { }
-
-// error
-// backend=stage1
-// target=native
-//
-// tmp.zig:2:5: error: unable to call function with naked calling convention
-// tmp.zig:4:1: note: declared here
diff --git a/test/cases/compile_errors/stage1/obj/signed_integer_division.zig b/test/cases/compile_errors/stage1/obj/signed_integer_division.zig
deleted file mode 100644
index 3eebbf2248..0000000000
--- a/test/cases/compile_errors/stage1/obj/signed_integer_division.zig
+++ /dev/null
@@ -1,9 +0,0 @@
-export fn foo(a: i32, b: i32) i32 {
- return a / b;
-}
-
-// error
-// backend=stage1
-// target=native
-//
-// tmp.zig:2:14: error: division with 'i32' and 'i32': signed integers must use @divTrunc, @divFloor, or @divExact
diff --git a/test/cases/compile_errors/stage1/obj/wrong_number_of_arguments_for_method_fn_call.zig b/test/cases/compile_errors/stage1/obj/wrong_number_of_arguments_for_method_fn_call.zig
deleted file mode 100644
index 7371223863..0000000000
--- a/test/cases/compile_errors/stage1/obj/wrong_number_of_arguments_for_method_fn_call.zig
+++ /dev/null
@@ -1,14 +0,0 @@
-const Foo = struct {
- fn method(self: *const Foo, a: i32) void {_ = self; _ = a;}
-};
-fn f(foo: *const Foo) void {
-
- foo.method(1, 2);
-}
-export fn entry() usize { return @sizeOf(@TypeOf(f)); }
-
-// error
-// backend=stage1
-// target=native
-//
-// tmp.zig:6:15: error: expected 2 argument(s), found 3
diff --git a/test/cases/compile_errors/struct_init_passed_to_type_param.zig b/test/cases/compile_errors/struct_init_passed_to_type_param.zig
new file mode 100644
index 0000000000..b00c27986f
--- /dev/null
+++ b/test/cases/compile_errors/struct_init_passed_to_type_param.zig
@@ -0,0 +1,14 @@
+const MyStruct = struct { x: i32 };
+
+fn hi(comptime T: type) usize {
+ return @sizeOf(T);
+}
+
+export const value = hi(MyStruct{ .x = 12 });
+
+// error
+// backend=stage2
+// target=native
+//
+// :7:33: error: expected type 'type', found 'tmp.MyStruct'
+// :1:18: note: struct declared here
diff --git a/test/cases/compile_errors/switch_on_slice.zig b/test/cases/compile_errors/switch_on_slice.zig
new file mode 100644
index 0000000000..b4644b132c
--- /dev/null
+++ b/test/cases/compile_errors/switch_on_slice.zig
@@ -0,0 +1,13 @@
+pub export fn entry() void {
+ var a: [:0]const u8 = "foo";
+ switch (a) {
+ "--version", "version" => unreachable,
+ else => {},
+ }
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :3:13: error: switch on type '[:0]const u8'
diff --git a/test/cases/compile_errors/tagName_on_invalid_value_of_non-exhaustive_enum.zig b/test/cases/compile_errors/tagName_on_invalid_value_of_non-exhaustive_enum.zig
index e9e63f0d7a..1de0d1c145 100644
--- a/test/cases/compile_errors/tagName_on_invalid_value_of_non-exhaustive_enum.zig
+++ b/test/cases/compile_errors/tagName_on_invalid_value_of_non-exhaustive_enum.zig
@@ -9,4 +9,4 @@ test "enum" {
// is_test=1
//
// :3:9: error: no field with value '5' in enum 'test.enum.E'
-// :1:1: note: declared here
+// :2:15: note: declared here
diff --git a/test/cases/compile_errors/union_fields_with_value_assignments.zig b/test/cases/compile_errors/union_fields_with_value_assignments.zig
deleted file mode 100644
index 2121568dd2..0000000000
--- a/test/cases/compile_errors/union_fields_with_value_assignments.zig
+++ /dev/null
@@ -1,7 +0,0 @@
-const Foo = union {};
-
-// error
-// backend=stage2
-// target=native
-//
-// :1:13: error: union declarations must have at least one tag
diff --git a/test/cases/compile_errors/union_noreturn_field_initialized.zig b/test/cases/compile_errors/union_noreturn_field_initialized.zig
new file mode 100644
index 0000000000..66304d6a74
--- /dev/null
+++ b/test/cases/compile_errors/union_noreturn_field_initialized.zig
@@ -0,0 +1,43 @@
+pub export fn entry1() void {
+ const U = union(enum) {
+ a: u32,
+ b: noreturn,
+ fn foo(_: @This()) void {}
+ fn bar() noreturn {
+ unreachable;
+ }
+ };
+
+ var a = U{ .b = undefined };
+ _ = a;
+}
+pub export fn entry2() void {
+ const U = union(enum) {
+ a: noreturn,
+ };
+ var u: U = undefined;
+ u = .a;
+}
+pub export fn entry3() void {
+ const U = union(enum) {
+ a: noreturn,
+ b: void,
+ };
+ var e = @typeInfo(U).Union.tag_type.?.a;
+ var u: U = undefined;
+ u = e;
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :11:21: error: cannot initialize 'noreturn' field of union
+// :4:9: note: field 'b' declared here
+// :2:15: note: union declared here
+// :19:10: error: cannot initialize 'noreturn' field of union
+// :16:9: note: field 'a' declared here
+// :15:15: note: union declared here
+// :28:9: error: runtime coercion from enum '@typeInfo(tmp.entry3.U).Union.tag_type.?' to union 'tmp.entry3.U' which has a 'noreturn' field
+// :23:9: note: 'noreturn' field here
+// :22:15: note: union declared here
diff --git a/test/cases/compile_errors/union_with_0_fields.zig b/test/cases/compile_errors/union_with_0_fields.zig
deleted file mode 100644
index 2121568dd2..0000000000
--- a/test/cases/compile_errors/union_with_0_fields.zig
+++ /dev/null
@@ -1,7 +0,0 @@
-const Foo = union {};
-
-// error
-// backend=stage2
-// target=native
-//
-// :1:13: error: union declarations must have at least one tag
diff --git a/test/cases/compile_errors/using_invalid_types_in_function_call_raises_an_error.zig b/test/cases/compile_errors/using_invalid_types_in_function_call_raises_an_error.zig
deleted file mode 100644
index ee6d1b8b7c..0000000000
--- a/test/cases/compile_errors/using_invalid_types_in_function_call_raises_an_error.zig
+++ /dev/null
@@ -1,11 +0,0 @@
-const MenuEffect = enum {};
-fn func(effect: MenuEffect) void { _ = effect; }
-export fn entry() void {
- func(MenuEffect.ThisDoesNotExist);
-}
-
-// error
-// backend=stage2
-// target=native
-//
-// :1:20: error: enum declarations must have at least one tag
diff --git a/test/cases/compile_errors/wrong_number_of_arguments.zig b/test/cases/compile_errors/wrong_number_of_arguments.zig
index 64eb11650a..05d761de18 100644
--- a/test/cases/compile_errors/wrong_number_of_arguments.zig
+++ b/test/cases/compile_errors/wrong_number_of_arguments.zig
@@ -7,4 +7,5 @@ fn c(d: i32, e: i32, f: i32) void { _ = d; _ = e; _ = f; }
// backend=stage2
// target=native
//
-// :2:6: error: expected 3 argument(s), found 1
+// :2:5: error: expected 3 argument(s), found 1
+// :4:1: note: function declared here
diff --git a/test/cases/compile_errors/wrong_number_of_arguments_for_method_fn_call.zig b/test/cases/compile_errors/wrong_number_of_arguments_for_method_fn_call.zig
new file mode 100644
index 0000000000..da6a7be4fa
--- /dev/null
+++ b/test/cases/compile_errors/wrong_number_of_arguments_for_method_fn_call.zig
@@ -0,0 +1,15 @@
+const Foo = struct {
+ fn method(self: *const Foo, a: i32) void {_ = self; _ = a;}
+};
+fn f(foo: *const Foo) void {
+
+ foo.method(1, 2);
+}
+export fn entry() usize { return @sizeOf(@TypeOf(&f)); }
+
+// error
+// backend=stage2
+// target=native
+//
+// :6:8: error: member function expected 1 argument(s), found 2
+// :2:5: note: function declared here
diff --git a/test/cases/error_in_nested_declaration.zig b/test/cases/error_in_nested_declaration.zig
new file mode 100644
index 0000000000..3fff746909
--- /dev/null
+++ b/test/cases/error_in_nested_declaration.zig
@@ -0,0 +1,31 @@
+const S = struct {
+ b: u32,
+ c: i32,
+ a: struct {
+ pub fn str(_: @This(), extra: []u32) []i32 {
+ return @bitCast([]i32, extra);
+ }
+ },
+};
+
+pub export fn entry() void {
+ var s: S = undefined;
+ _ = s.a.str(undefined);
+}
+
+const S2 = struct {
+ a: [*c]anyopaque,
+};
+
+pub export fn entry2() void {
+ var s: S2 = undefined;
+ _ = s;
+}
+
+// error
+// backend=llvm
+// target=native
+//
+// :17:12: error: C pointers cannot point to opaque types
+// :6:29: error: cannot @bitCast to '[]i32'
+// :6:29: note: use @ptrCast to cast from '[]u32'
diff --git a/test/cases/riscv64-linux/hello_world_with_updates.0.zig b/test/cases/riscv64-linux/hello_world_with_updates.0.zig
deleted file mode 100644
index dd119fd1f4..0000000000
--- a/test/cases/riscv64-linux/hello_world_with_updates.0.zig
+++ /dev/null
@@ -1,21 +0,0 @@
-pub fn main() void {
- print();
-}
-
-fn print() void {
- asm volatile ("ecall"
- :
- : [number] "{a7}" (64),
- [arg1] "{a0}" (1),
- [arg2] "{a1}" (@ptrToInt("Hello, World!\n")),
- [arg3] "{a2}" ("Hello, World!\n".len),
- : "rcx", "r11", "memory"
- );
- return;
-}
-
-// run
-// target=riscv64-linux
-//
-// Hello, World!
-//
diff --git a/test/cases/riscv64-linux/hello_world_with_updates.1.zig b/test/cases/riscv64-linux/hello_world_with_updates.1.zig
deleted file mode 100644
index 26718738a9..0000000000
--- a/test/cases/riscv64-linux/hello_world_with_updates.1.zig
+++ /dev/null
@@ -1,27 +0,0 @@
-pub fn main() void {
- print();
- print();
- print();
- print();
-}
-
-fn print() void {
- asm volatile ("ecall"
- :
- : [number] "{a7}" (64),
- [arg1] "{a0}" (1),
- [arg2] "{a1}" (@ptrToInt("Hello, World!\n")),
- [arg3] "{a2}" ("Hello, World!\n".len),
- : "rcx", "r11", "memory"
- );
- return;
-}
-
-// run
-// target=riscv64-linux
-//
-// Hello, World!
-// Hello, World!
-// Hello, World!
-// Hello, World!
-//
diff --git a/test/cases/safety/@intToEnum - no matching tag value.zig b/test/cases/safety/@intToEnum - no matching tag value.zig
index 79fcf33bc6..0e5f401f6d 100644
--- a/test/cases/safety/@intToEnum - no matching tag value.zig
+++ b/test/cases/safety/@intToEnum - no matching tag value.zig
@@ -1,9 +1,11 @@
const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
- _ = message;
_ = stack_trace;
- std.process.exit(0);
+ if (std.mem.eql(u8, message, "invalid enum value")) {
+ std.process.exit(0);
+ }
+ std.process.exit(1);
}
const Foo = enum {
A,
@@ -18,6 +20,7 @@ fn bar(a: u2) Foo {
return @intToEnum(Foo, a);
}
fn baz(_: Foo) void {}
+
// run
-// backend=stage1
+// backend=llvm
// target=native
diff --git a/test/cases/safety/@tagName on corrupted enum value.zig b/test/cases/safety/@tagName on corrupted enum value.zig
index 507157911e..4081d171c4 100644
--- a/test/cases/safety/@tagName on corrupted enum value.zig
+++ b/test/cases/safety/@tagName on corrupted enum value.zig
@@ -10,6 +10,7 @@ pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noretur
const E = enum(u32) {
X = 1,
+ Y = 2,
};
pub fn main() !void {
@@ -21,5 +22,5 @@ pub fn main() !void {
}
// run
-// backend=stage1
+// backend=llvm
// target=native
diff --git a/test/cases/safety/@tagName on corrupted union value.zig b/test/cases/safety/@tagName on corrupted union value.zig
index 0c35b5ef3d..eb36fab262 100644
--- a/test/cases/safety/@tagName on corrupted union value.zig
+++ b/test/cases/safety/@tagName on corrupted union value.zig
@@ -10,6 +10,7 @@ pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noretur
const U = union(enum(u32)) {
X: u8,
+ Y: i8,
};
pub fn main() !void {
@@ -22,5 +23,5 @@ pub fn main() !void {
}
// run
-// backend=stage1
+// backend=llvm
// target=native
diff --git a/test/cases/safety/cast []u8 to bigger slice of wrong size.zig b/test/cases/safety/cast []u8 to bigger slice of wrong size.zig
index 588801b27e..6fddb63bee 100644
--- a/test/cases/safety/cast []u8 to bigger slice of wrong size.zig
+++ b/test/cases/safety/cast []u8 to bigger slice of wrong size.zig
@@ -1,9 +1,11 @@
const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
- _ = message;
_ = stack_trace;
- std.process.exit(0);
+ if (std.mem.eql(u8, message, "exact division produced remainder")) {
+ std.process.exit(0);
+ }
+ std.process.exit(1);
}
pub fn main() !void {
@@ -15,5 +17,5 @@ fn widenSlice(slice: []align(1) const u8) []align(1) const i32 {
return std.mem.bytesAsSlice(i32, slice);
}
// run
-// backend=stage1
-// target=native
\ No newline at end of file
+// backend=llvm
+// target=native
diff --git a/test/cases/safety/empty slice with sentinel out of bounds.zig b/test/cases/safety/empty slice with sentinel out of bounds.zig
index ad8010868a..d989a33541 100644
--- a/test/cases/safety/empty slice with sentinel out of bounds.zig
+++ b/test/cases/safety/empty slice with sentinel out of bounds.zig
@@ -2,7 +2,7 @@ const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
_ = stack_trace;
- if (std.mem.eql(u8, message, "index out of bounds")) {
+ if (std.mem.eql(u8, message, "index out of bounds: index 1, len 0")) {
std.process.exit(0);
}
std.process.exit(1);
@@ -17,5 +17,5 @@ pub fn main() !void {
}
// run
-// backend=stage1
+// backend=llvm
// target=native
diff --git a/test/cases/safety/modrem by zero.zig b/test/cases/safety/modrem by zero.zig
new file mode 100644
index 0000000000..435570f2fb
--- /dev/null
+++ b/test/cases/safety/modrem by zero.zig
@@ -0,0 +1,20 @@
+const std = @import("std");
+
+pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
+ _ = stack_trace;
+ if (std.mem.eql(u8, message, "division by zero")) {
+ std.process.exit(0);
+ }
+ std.process.exit(1);
+}
+pub fn main() !void {
+ const x = div0(999, 0);
+ _ = x;
+ return error.TestFailed;
+}
+fn div0(a: u32, b: u32) u32 {
+ return a / b;
+}
+// run
+// backend=llvm
+// target=native
diff --git a/test/cases/safety/modulus by zero.zig b/test/cases/safety/modulus by zero.zig
new file mode 100644
index 0000000000..9d57865a87
--- /dev/null
+++ b/test/cases/safety/modulus by zero.zig
@@ -0,0 +1,20 @@
+const std = @import("std");
+
+pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
+ _ = stack_trace;
+ if (std.mem.eql(u8, message, "division by zero")) {
+ std.process.exit(0);
+ }
+ std.process.exit(1);
+}
+pub fn main() !void {
+ const x = mod0(999, 0);
+ _ = x;
+ return error.TestFailed;
+}
+fn mod0(a: i32, b: i32) i32 {
+ return @mod(a, b);
+}
+// run
+// backend=llvm
+// target=native
diff --git a/test/cases/safety/out of bounds slice access.zig b/test/cases/safety/out of bounds slice access.zig
index a30532aee7..ddd9e74cf2 100644
--- a/test/cases/safety/out of bounds slice access.zig
+++ b/test/cases/safety/out of bounds slice access.zig
@@ -2,20 +2,20 @@ const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
_ = stack_trace;
- if (std.mem.eql(u8, message, "attempt to index out of bound: index 4, len 4")) {
+ if (std.mem.eql(u8, message, "index out of bounds: index 4, len 4")) {
std.process.exit(0);
}
std.process.exit(1);
}
pub fn main() !void {
- const a = [_]i32{1, 2, 3, 4};
+ const a = [_]i32{ 1, 2, 3, 4 };
baz(bar(&a));
return error.TestFailed;
}
fn bar(a: []const i32) i32 {
return a[4];
}
-fn baz(_: i32) void { }
+fn baz(_: i32) void {}
// run
// backend=llvm
// target=native
diff --git a/test/cases/safety/pointer casting null to non-optional pointer.zig b/test/cases/safety/pointer casting null to non-optional pointer.zig
index 0254e002ad..e46b84f783 100644
--- a/test/cases/safety/pointer casting null to non-optional pointer.zig
+++ b/test/cases/safety/pointer casting null to non-optional pointer.zig
@@ -1,16 +1,20 @@
const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
- _ = message;
_ = stack_trace;
- std.process.exit(0);
+ if (std.mem.eql(u8, message, "cast causes pointer to be null")) {
+ std.process.exit(0);
+ }
+ std.process.exit(1);
}
+
pub fn main() !void {
var c_ptr: [*c]u8 = 0;
var zig_ptr: *u8 = c_ptr;
_ = zig_ptr;
return error.TestFailed;
}
+
// run
-// backend=stage1
+// backend=llvm
// target=native
diff --git a/test/cases/safety/pointer slice sentinel mismatch.zig b/test/cases/safety/pointer slice sentinel mismatch.zig
index f79e2a860c..ec25ec2969 100644
--- a/test/cases/safety/pointer slice sentinel mismatch.zig
+++ b/test/cases/safety/pointer slice sentinel mismatch.zig
@@ -2,14 +2,14 @@ const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
_ = stack_trace;
- if (std.mem.eql(u8, message, "sentinel mismatch")) {
+ if (std.mem.eql(u8, message, "sentinel mismatch: expected 0, found 4")) {
std.process.exit(0);
}
std.process.exit(1);
}
pub fn main() !void {
- var buf: [4]u8 = undefined;
+ var buf: [4]u8 = .{ 1, 2, 3, 4 };
const ptr: [*]u8 = &buf;
const slice = ptr[0..3 :0];
_ = slice;
@@ -17,5 +17,5 @@ pub fn main() !void {
}
// run
-// backend=stage1
+// backend=llvm
// target=native
diff --git a/test/cases/safety/remainder division by negative number.zig b/test/cases/safety/remainder division by zero.zig
similarity index 69%
rename from test/cases/safety/remainder division by negative number.zig
rename to test/cases/safety/remainder division by zero.zig
index 2edbf4509c..71e295c4dd 100644
--- a/test/cases/safety/remainder division by negative number.zig
+++ b/test/cases/safety/remainder division by zero.zig
@@ -2,17 +2,17 @@ const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
_ = stack_trace;
- if (std.mem.eql(u8, message, "remainder division by zero or negative value")) {
+ if (std.mem.eql(u8, message, "division by zero")) {
std.process.exit(0);
}
std.process.exit(1);
}
pub fn main() !void {
- const x = div0(999, -1);
+ const x = rem0(999, 0);
_ = x;
return error.TestFailed;
}
-fn div0(a: i32, b: i32) i32 {
+fn rem0(a: i32, b: i32) i32 {
return @rem(a, b);
}
// run
diff --git a/test/cases/safety/shift left by huge amount.zig b/test/cases/safety/shift left by huge amount.zig
index b1159b7d75..e786a739d6 100644
--- a/test/cases/safety/shift left by huge amount.zig
+++ b/test/cases/safety/shift left by huge amount.zig
@@ -17,5 +17,5 @@ pub fn main() !void {
}
// run
-// backend=stage1
+// backend=llvm
// target=native
diff --git a/test/cases/safety/shift right by huge amount.zig b/test/cases/safety/shift right by huge amount.zig
index 2c39011240..a45b8c24ce 100644
--- a/test/cases/safety/shift right by huge amount.zig
+++ b/test/cases/safety/shift right by huge amount.zig
@@ -17,5 +17,5 @@ pub fn main() !void {
}
// run
-// backend=stage1
+// backend=llvm
// target=native
diff --git a/test/cases/safety/signed integer division overflow - vectors.zig b/test/cases/safety/signed integer division overflow - vectors.zig
index d59adeb698..8bc5be0d63 100644
--- a/test/cases/safety/signed integer division overflow - vectors.zig
+++ b/test/cases/safety/signed integer division overflow - vectors.zig
@@ -1,9 +1,11 @@
const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
- _ = message;
_ = stack_trace;
- std.process.exit(0);
+ if (std.mem.eql(u8, message, "integer overflow")) {
+ std.process.exit(0);
+ }
+ std.process.exit(1);
}
pub fn main() !void {
@@ -17,5 +19,5 @@ fn div(a: @Vector(4, i16), b: @Vector(4, i16)) @Vector(4, i16) {
return @divTrunc(a, b);
}
// run
-// backend=stage1
-// target=native
\ No newline at end of file
+// backend=llvm
+// target=native
diff --git a/test/cases/safety/signed integer division overflow.zig b/test/cases/safety/signed integer division overflow.zig
index a46f175487..6d17c284df 100644
--- a/test/cases/safety/signed integer division overflow.zig
+++ b/test/cases/safety/signed integer division overflow.zig
@@ -1,9 +1,11 @@
const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
- _ = message;
_ = stack_trace;
- std.process.exit(0);
+ if (std.mem.eql(u8, message, "integer overflow")) {
+ std.process.exit(0);
+ }
+ std.process.exit(1);
}
pub fn main() !void {
@@ -15,5 +17,5 @@ fn div(a: i16, b: i16) i16 {
return @divTrunc(a, b);
}
// run
-// backend=stage1
-// target=native
\ No newline at end of file
+// backend=llvm
+// target=native
diff --git a/test/cases/safety/slice sentinel mismatch - floats.zig b/test/cases/safety/slice sentinel mismatch - floats.zig
index 3295c20db3..3d08872ca7 100644
--- a/test/cases/safety/slice sentinel mismatch - floats.zig
+++ b/test/cases/safety/slice sentinel mismatch - floats.zig
@@ -2,19 +2,19 @@ const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
_ = stack_trace;
- if (std.mem.eql(u8, message, "sentinel mismatch")) {
+ if (std.mem.eql(u8, message, "sentinel mismatch: expected 1.20000004e+00, found 4.0e+00")) {
std.process.exit(0);
}
std.process.exit(1);
}
pub fn main() !void {
- var buf: [4]f32 = undefined;
+ var buf: [4]f32 = .{ 1, 2, 3, 4 };
const slice = buf[0..3 :1.2];
_ = slice;
return error.TestFailed;
}
// run
-// backend=stage1
+// backend=llvm
// target=native
diff --git a/test/cases/safety/slice sentinel mismatch - optional pointers.zig b/test/cases/safety/slice sentinel mismatch - optional pointers.zig
index ecb82c61d4..fbc6fcf428 100644
--- a/test/cases/safety/slice sentinel mismatch - optional pointers.zig
+++ b/test/cases/safety/slice sentinel mismatch - optional pointers.zig
@@ -2,19 +2,19 @@ const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
_ = stack_trace;
- if (std.mem.eql(u8, message, "sentinel mismatch")) {
+ if (std.mem.eql(u8, message, "sentinel mismatch: expected null, found i32@10")) {
std.process.exit(0);
}
std.process.exit(1);
}
pub fn main() !void {
- var buf: [4]?*i32 = undefined;
+ var buf: [4]?*i32 = .{ @intToPtr(*i32, 4), @intToPtr(*i32, 8), @intToPtr(*i32, 12), @intToPtr(*i32, 16) };
const slice = buf[0..3 :null];
_ = slice;
return error.TestFailed;
}
// run
-// backend=stage1
+// backend=llvm
// target=native
diff --git a/test/cases/safety/slice slice sentinel mismatch.zig b/test/cases/safety/slice slice sentinel mismatch.zig
index 13b331a0f4..b1bca1a11f 100644
--- a/test/cases/safety/slice slice sentinel mismatch.zig
+++ b/test/cases/safety/slice slice sentinel mismatch.zig
@@ -2,18 +2,18 @@ const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
_ = stack_trace;
- if (std.mem.eql(u8, message, "sentinel mismatch")) {
+ if (std.mem.eql(u8, message, "sentinel mismatch: expected 0, found 4")) {
std.process.exit(0);
}
std.process.exit(1);
}
pub fn main() !void {
- var buf: [4]u8 = undefined;
+ var buf: [4]u8 = .{ 1, 2, 3, 4 };
const slice = buf[0..];
const slice2 = slice[0..3 :0];
_ = slice2;
return error.TestFailed;
}
// run
-// backend=stage1
+// backend=llvm
// target=native
diff --git a/test/cases/safety/slice with sentinel out of bounds - runtime len.zig b/test/cases/safety/slice with sentinel out of bounds - runtime len.zig
new file mode 100644
index 0000000000..524c69d7b7
--- /dev/null
+++ b/test/cases/safety/slice with sentinel out of bounds - runtime len.zig
@@ -0,0 +1,22 @@
+const std = @import("std");
+
+pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
+ _ = stack_trace;
+ if (std.mem.eql(u8, message, "index out of bounds: index 5, len 4")) {
+ std.process.exit(0);
+ }
+ std.process.exit(1);
+}
+
+pub fn main() !void {
+ var buf = [4]u8{ 'a', 'b', 'c', 0 };
+ const input: []u8 = &buf;
+ var len: usize = 4;
+ const slice = input[0..len :0];
+ _ = slice;
+ return error.TestFailed;
+}
+
+// run
+// backend=llvm
+// target=native
diff --git a/test/cases/safety/slice with sentinel out of bounds.zig b/test/cases/safety/slice with sentinel out of bounds.zig
index 1ca83ea481..636235a5b3 100644
--- a/test/cases/safety/slice with sentinel out of bounds.zig
+++ b/test/cases/safety/slice with sentinel out of bounds.zig
@@ -2,7 +2,7 @@ const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
_ = stack_trace;
- if (std.mem.eql(u8, message, "index out of bounds")) {
+ if (std.mem.eql(u8, message, "index out of bounds: index 5, len 4")) {
std.process.exit(0);
}
std.process.exit(1);
@@ -17,5 +17,5 @@ pub fn main() !void {
}
// run
-// backend=stage1
+// backend=llvm
// target=native
diff --git a/test/cases/safety/slicing null C pointer - runtime len.zig b/test/cases/safety/slicing null C pointer - runtime len.zig
new file mode 100644
index 0000000000..2767253612
--- /dev/null
+++ b/test/cases/safety/slicing null C pointer - runtime len.zig
@@ -0,0 +1,20 @@
+const std = @import("std");
+
+pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
+ _ = stack_trace;
+ if (std.mem.eql(u8, message, "attempt to use null value")) {
+ std.process.exit(0);
+ }
+ std.process.exit(1);
+}
+
+pub fn main() !void {
+ var ptr: [*c]const u32 = null;
+ var len: usize = 3;
+ var slice = ptr[0..len];
+ _ = slice;
+ return error.TestFailed;
+}
+// run
+// backend=llvm
+// target=native
diff --git a/test/cases/safety/slicing null C pointer.zig b/test/cases/safety/slicing null C pointer.zig
index db8d235c45..f5041adae7 100644
--- a/test/cases/safety/slicing null C pointer.zig
+++ b/test/cases/safety/slicing null C pointer.zig
@@ -1,9 +1,11 @@
const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
- _ = message;
_ = stack_trace;
- std.process.exit(0);
+ if (std.mem.eql(u8, message, "attempt to use null value")) {
+ std.process.exit(0);
+ }
+ std.process.exit(1);
}
pub fn main() !void {
@@ -13,5 +15,5 @@ pub fn main() !void {
return error.TestFailed;
}
// run
-// backend=stage1
-// target=native
\ No newline at end of file
+// backend=llvm
+// target=native
diff --git a/test/cases/safety/switch on corrupted enum value.zig b/test/cases/safety/switch on corrupted enum value.zig
index dc7b9b3abf..fd94976763 100644
--- a/test/cases/safety/switch on corrupted enum value.zig
+++ b/test/cases/safety/switch on corrupted enum value.zig
@@ -2,7 +2,7 @@ const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
_ = stack_trace;
- if (std.mem.eql(u8, message, "reached unreachable code")) {
+ if (std.mem.eql(u8, message, "switch on corrupt value")) {
std.process.exit(0);
}
std.process.exit(1);
@@ -10,17 +10,18 @@ pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noretur
const E = enum(u32) {
X = 1,
+ Y = 2,
};
pub fn main() !void {
var e: E = undefined;
@memset(@ptrCast([*]u8, &e), 0x55, @sizeOf(E));
switch (e) {
- .X => @breakpoint(),
+ .X, .Y => @breakpoint(),
}
return error.TestFailed;
}
// run
-// backend=stage1
+// backend=llvm
// target=native
diff --git a/test/cases/safety/switch on corrupted union value.zig b/test/cases/safety/switch on corrupted union value.zig
index 0fadad3c7e..059f0dc042 100644
--- a/test/cases/safety/switch on corrupted union value.zig
+++ b/test/cases/safety/switch on corrupted union value.zig
@@ -2,7 +2,7 @@ const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
_ = stack_trace;
- if (std.mem.eql(u8, message, "reached unreachable code")) {
+ if (std.mem.eql(u8, message, "switch on corrupt value")) {
std.process.exit(0);
}
std.process.exit(1);
@@ -10,17 +10,18 @@ pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noretur
const U = union(enum(u32)) {
X: u8,
+ Y: i8,
};
pub fn main() !void {
var u: U = undefined;
@memset(@ptrCast([*]u8, &u), 0x55, @sizeOf(U));
switch (u) {
- .X => @breakpoint(),
+ .X, .Y => @breakpoint(),
}
return error.TestFailed;
}
// run
-// backend=stage1
+// backend=llvm
// target=native
diff --git a/test/cases/safety/zero casted to error.zig b/test/cases/safety/zero casted to error.zig
new file mode 100644
index 0000000000..3a2edf834a
--- /dev/null
+++ b/test/cases/safety/zero casted to error.zig
@@ -0,0 +1,19 @@
+const std = @import("std");
+
+pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace) noreturn {
+ _ = stack_trace;
+ if (std.mem.eql(u8, message, "invalid error code")) {
+ std.process.exit(0);
+ }
+ std.process.exit(1);
+}
+pub fn main() !void {
+ bar(0) catch {};
+ return error.TestFailed;
+}
+fn bar(x: u16) anyerror {
+ return @intToError(x);
+}
+// run
+// backend=llvm
+// target=native
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index 180cf74bcb..60de07d1e3 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -184,7 +184,7 @@ pub fn addCases(ctx: *TestContext) !void {
}
{
- const case = ctx.obj("argument causes error ", .{});
+ const case = ctx.obj("argument causes error", .{});
case.backend = .stage2;
case.addSourceFile("b.zig",
@@ -204,6 +204,24 @@ pub fn addCases(ctx: *TestContext) !void {
, &[_][]const u8{
":3:12: error: unable to resolve comptime value",
":3:12: note: argument to function being called at comptime must be comptime known",
+ ":2:55: note: generic function is instantiated with a comptime only return type",
+ });
+ }
+
+ {
+ const case = ctx.obj("astgen failure in file struct", .{});
+ case.backend = .stage2;
+
+ case.addSourceFile("b.zig",
+ \\bad
+ );
+
+ case.addError(
+ \\pub export fn entry() void {
+ \\ _ = (@sizeOf(@import("b.zig")));
+ \\}
+ , &[_][]const u8{
+ ":1:1: error: struct field missing type",
});
}
diff --git a/test/link.zig b/test/link.zig
index 1e75620919..215a0511fc 100644
--- a/test/link.zig
+++ b/test/link.zig
@@ -23,11 +23,12 @@ pub fn addCases(cases: *tests.StandaloneContext) void {
.build_modes = true,
});
- cases.addBuildFile("test/link/tls/build.zig", .{
- .build_modes = true,
- });
+ addWasmCases(cases);
+ addMachOCases(cases);
+}
- cases.addBuildFile("test/link/wasm/type/build.zig", .{
+fn addWasmCases(cases: *tests.StandaloneContext) void {
+ cases.addBuildFile("test/link/wasm/bss/build.zig", .{
.build_modes = true,
.requires_stage2 = true,
});
@@ -42,23 +43,18 @@ pub fn addCases(cases: *tests.StandaloneContext) void {
.requires_stage2 = true,
});
- cases.addBuildFile("test/link/wasm/bss/build.zig", .{
+ cases.addBuildFile("test/link/wasm/type/build.zig", .{
.build_modes = true,
.requires_stage2 = true,
});
- cases.addBuildFile("test/link/macho/entry/build.zig", .{
- .build_modes = true,
- });
-
- cases.addBuildFile("test/link/macho/pagezero/build.zig", .{
- .build_modes = false,
- });
-
- cases.addBuildFile("test/link/macho/dylib/build.zig", .{
+ cases.addBuildFile("test/link/wasm/archive/build.zig", .{
.build_modes = true,
+ .requires_stage2 = true,
});
+}
+fn addMachOCases(cases: *tests.StandaloneContext) void {
cases.addBuildFile("test/link/macho/dead_strip/build.zig", .{
.build_modes = false,
});
@@ -68,41 +64,11 @@ pub fn addCases(cases: *tests.StandaloneContext) void {
.requires_macos_sdk = true,
});
- cases.addBuildFile("test/link/macho/needed_library/build.zig", .{
+ cases.addBuildFile("test/link/macho/dylib/build.zig", .{
.build_modes = true,
});
- cases.addBuildFile("test/link/macho/weak_library/build.zig", .{
- .build_modes = true,
- });
-
- cases.addBuildFile("test/link/macho/needed_framework/build.zig", .{
- .build_modes = true,
- .requires_macos_sdk = true,
- });
-
- cases.addBuildFile("test/link/macho/weak_framework/build.zig", .{
- .build_modes = true,
- .requires_macos_sdk = true,
- });
-
- // Try to build and run an Objective-C executable.
- cases.addBuildFile("test/link/macho/objc/build.zig", .{
- .build_modes = true,
- .requires_macos_sdk = true,
- });
-
- // Try to build and run an Objective-C++ executable.
- cases.addBuildFile("test/link/macho/objcpp/build.zig", .{
- .build_modes = true,
- .requires_macos_sdk = true,
- });
-
- cases.addBuildFile("test/link/macho/stack_size/build.zig", .{
- .build_modes = true,
- });
-
- cases.addBuildFile("test/link/macho/search_strategy/build.zig", .{
+ cases.addBuildFile("test/link/macho/entry/build.zig", .{
.build_modes = true,
});
@@ -110,4 +76,48 @@ pub fn addCases(cases: *tests.StandaloneContext) void {
.build_modes = true,
.requires_macos_sdk = true,
});
+
+ cases.addBuildFile("test/link/macho/needed_framework/build.zig", .{
+ .build_modes = true,
+ .requires_macos_sdk = true,
+ });
+
+ cases.addBuildFile("test/link/macho/needed_library/build.zig", .{
+ .build_modes = true,
+ });
+
+ cases.addBuildFile("test/link/macho/objc/build.zig", .{
+ .build_modes = true,
+ .requires_macos_sdk = true,
+ });
+
+ cases.addBuildFile("test/link/macho/objcpp/build.zig", .{
+ .build_modes = true,
+ .requires_macos_sdk = true,
+ });
+
+ cases.addBuildFile("test/link/macho/pagezero/build.zig", .{
+ .build_modes = false,
+ });
+
+ cases.addBuildFile("test/link/macho/search_strategy/build.zig", .{
+ .build_modes = true,
+ });
+
+ cases.addBuildFile("test/link/macho/stack_size/build.zig", .{
+ .build_modes = true,
+ });
+
+ cases.addBuildFile("test/link/macho/tls/build.zig", .{
+ .build_modes = true,
+ });
+
+ cases.addBuildFile("test/link/macho/weak_library/build.zig", .{
+ .build_modes = true,
+ });
+
+ cases.addBuildFile("test/link/macho/weak_framework/build.zig", .{
+ .build_modes = true,
+ .requires_macos_sdk = true,
+ });
}
diff --git a/test/link/macho/dead_strip/build.zig b/test/link/macho/dead_strip/build.zig
index dea225dd6f..25759f5619 100644
--- a/test/link/macho/dead_strip/build.zig
+++ b/test/link/macho/dead_strip/build.zig
@@ -4,13 +4,14 @@ const LibExeObjectStep = std.build.LibExeObjStep;
pub fn build(b: *Builder) void {
const mode = b.standardReleaseOptions();
+ const target: std.zig.CrossTarget = .{ .os_tag = .macos };
const test_step = b.step("test", "Test the program");
test_step.dependOn(b.getInstallStep());
{
// Without -dead_strip, we expect `iAmUnused` symbol present
- const exe = createScenario(b, mode);
+ const exe = createScenario(b, mode, target);
const check = exe.checkObject(.macho);
check.checkInSymtab();
@@ -23,7 +24,7 @@ pub fn build(b: *Builder) void {
{
// With -dead_strip, no `iAmUnused` symbol should be present
- const exe = createScenario(b, mode);
+ const exe = createScenario(b, mode, target);
exe.link_gc_sections = true;
const check = exe.checkObject(.macho);
@@ -36,10 +37,11 @@ pub fn build(b: *Builder) void {
}
}
-fn createScenario(b: *Builder, mode: std.builtin.Mode) *LibExeObjectStep {
+fn createScenario(b: *Builder, mode: std.builtin.Mode, target: std.zig.CrossTarget) *LibExeObjectStep {
const exe = b.addExecutable("test", null);
exe.addCSourceFile("main.c", &[0][]const u8{});
exe.setBuildMode(mode);
+ exe.setTarget(target);
exe.linkLibC();
return exe;
}
diff --git a/test/link/macho/pagezero/build.zig b/test/link/macho/pagezero/build.zig
index 9dbc0e6473..5a7044d960 100644
--- a/test/link/macho/pagezero/build.zig
+++ b/test/link/macho/pagezero/build.zig
@@ -3,13 +3,14 @@ const Builder = std.build.Builder;
pub fn build(b: *Builder) void {
const mode = b.standardReleaseOptions();
+ const target: std.zig.CrossTarget = .{ .os_tag = .macos };
const test_step = b.step("test", "Test");
test_step.dependOn(b.getInstallStep());
{
const exe = b.addExecutable("pagezero", null);
- exe.setTarget(.{ .os_tag = .macos });
+ exe.setTarget(target);
exe.setBuildMode(mode);
exe.addCSourceFile("main.c", &.{});
exe.linkLibC();
@@ -29,7 +30,7 @@ pub fn build(b: *Builder) void {
{
const exe = b.addExecutable("no_pagezero", null);
- exe.setTarget(.{ .os_tag = .macos });
+ exe.setTarget(target);
exe.setBuildMode(mode);
exe.addCSourceFile("main.c", &.{});
exe.linkLibC();
diff --git a/test/link/macho/search_strategy/build.zig b/test/link/macho/search_strategy/build.zig
index 39a82bc6a7..e556b5bb23 100644
--- a/test/link/macho/search_strategy/build.zig
+++ b/test/link/macho/search_strategy/build.zig
@@ -1,17 +1,17 @@
const std = @import("std");
const Builder = std.build.Builder;
const LibExeObjectStep = std.build.LibExeObjStep;
-const target: std.zig.CrossTarget = .{ .os_tag = .macos };
pub fn build(b: *Builder) void {
const mode = b.standardReleaseOptions();
+ const target: std.zig.CrossTarget = .{ .os_tag = .macos };
const test_step = b.step("test", "Test");
test_step.dependOn(b.getInstallStep());
{
// -search_dylibs_first
- const exe = createScenario(b, mode);
+ const exe = createScenario(b, mode, target);
exe.search_strategy = .dylibs_first;
const check = exe.checkObject(.macho);
@@ -26,7 +26,7 @@ pub fn build(b: *Builder) void {
{
// -search_paths_first
- const exe = createScenario(b, mode);
+ const exe = createScenario(b, mode, target);
exe.search_strategy = .paths_first;
const run = std.build.EmulatableRunStep.create(b, "run", exe);
@@ -36,7 +36,7 @@ pub fn build(b: *Builder) void {
}
}
-fn createScenario(b: *Builder, mode: std.builtin.Mode) *LibExeObjectStep {
+fn createScenario(b: *Builder, mode: std.builtin.Mode, target: std.zig.CrossTarget) *LibExeObjectStep {
const static = b.addStaticLibrary("a", null);
static.setTarget(target);
static.setBuildMode(mode);
diff --git a/test/link/macho/stack_size/build.zig b/test/link/macho/stack_size/build.zig
index 3abf48df7a..91c44baf52 100644
--- a/test/link/macho/stack_size/build.zig
+++ b/test/link/macho/stack_size/build.zig
@@ -3,12 +3,13 @@ const Builder = std.build.Builder;
pub fn build(b: *Builder) void {
const mode = b.standardReleaseOptions();
+ const target: std.zig.CrossTarget = .{ .os_tag = .macos };
const test_step = b.step("test", "Test");
test_step.dependOn(b.getInstallStep());
const exe = b.addExecutable("main", null);
- exe.setTarget(.{ .os_tag = .macos });
+ exe.setTarget(target);
exe.setBuildMode(mode);
exe.addCSourceFile("main.c", &.{});
exe.linkLibC();
diff --git a/test/link/tls/a.c b/test/link/macho/tls/a.c
similarity index 100%
rename from test/link/tls/a.c
rename to test/link/macho/tls/a.c
diff --git a/test/link/tls/build.zig b/test/link/macho/tls/build.zig
similarity index 71%
rename from test/link/tls/build.zig
rename to test/link/macho/tls/build.zig
index ebf15ca439..031a05cedf 100644
--- a/test/link/tls/build.zig
+++ b/test/link/macho/tls/build.zig
@@ -1,15 +1,19 @@
-const Builder = @import("std").build.Builder;
+const std = @import("std");
+const Builder = std.build.Builder;
pub fn build(b: *Builder) void {
const mode = b.standardReleaseOptions();
+ const target: std.zig.CrossTarget = .{ .os_tag = .macos };
const lib = b.addSharedLibrary("a", null, b.version(1, 0, 0));
lib.setBuildMode(mode);
+ lib.setTarget(target);
lib.addCSourceFile("a.c", &.{});
lib.linkLibC();
const test_exe = b.addTest("main.zig");
test_exe.setBuildMode(mode);
+ test_exe.setTarget(target);
test_exe.linkLibrary(lib);
test_exe.linkLibC();
diff --git a/test/link/tls/main.zig b/test/link/macho/tls/main.zig
similarity index 100%
rename from test/link/tls/main.zig
rename to test/link/macho/tls/main.zig
diff --git a/test/link/wasm/archive/build.zig b/test/link/wasm/archive/build.zig
new file mode 100644
index 0000000000..95ce444659
--- /dev/null
+++ b/test/link/wasm/archive/build.zig
@@ -0,0 +1,27 @@
+const std = @import("std");
+const Builder = std.build.Builder;
+
+pub fn build(b: *Builder) void {
+ const mode = b.standardReleaseOptions();
+
+ const test_step = b.step("test", "Test");
+ test_step.dependOn(b.getInstallStep());
+
+ // The code in question will pull-in compiler-rt,
+ // and therefore link with its archive file.
+ const lib = b.addSharedLibrary("main", "main.zig", .unversioned);
+ lib.setBuildMode(mode);
+ lib.setTarget(.{ .cpu_arch = .wasm32, .os_tag = .freestanding });
+ lib.use_llvm = false;
+ lib.use_stage1 = false;
+ lib.use_lld = false;
+
+ const check = lib.checkObject(.wasm);
+ check.checkStart("Section import");
+ check.checkNext("entries 1"); // __truncsfhf2 should have been resolved, so only 1 import (compiler-rt's memcpy).
+
+ check.checkStart("Section custom");
+ check.checkNext("name __truncsfhf2"); // Ensure it was imported and resolved
+
+ test_step.dependOn(&check.step);
+}
diff --git a/test/link/wasm/archive/main.zig b/test/link/wasm/archive/main.zig
new file mode 100644
index 0000000000..29be3af0ac
--- /dev/null
+++ b/test/link/wasm/archive/main.zig
@@ -0,0 +1,6 @@
+export fn foo() void {
+ var a: f16 = 2.2;
+ // this will pull-in compiler-rt
+ var b = @trunc(a);
+ _ = b;
+}
diff --git a/test/stack_traces.zig b/test/stack_traces.zig
index c40413f936..3a8682b5a5 100644
--- a/test/stack_traces.zig
+++ b/test/stack_traces.zig
@@ -21,7 +21,8 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
},
.ReleaseSafe = .{
.exclude_os = .{
- .windows, // segfault
+ .windows, // TODO
+ .linux, // defeated by aggressive inlining
},
.expect =
\\error: TheSkyIsFalling
@@ -70,7 +71,7 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
},
.ReleaseSafe = .{
.exclude_os = .{
- .windows, // segfault
+ .windows, // TODO
},
.expect =
\\error: TheSkyIsFalling
@@ -136,7 +137,7 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
},
.ReleaseSafe = .{
.exclude_os = .{
- .windows, // segfault
+ .windows, // TODO
},
.expect =
\\error: TheSkyIsFalling
@@ -172,7 +173,7 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
cases.addCase(.{
.exclude_os = .{
.openbsd, // integer overflow
- .windows,
+ .windows, // TODO intermittent failures
},
.name = "dumpCurrentStackTrace",
.source =
diff --git a/test/stage2/cbe.zig b/test/stage2/cbe.zig
index 644cba74c1..321de1e3f6 100644
--- a/test/stage2/cbe.zig
+++ b/test/stage2/cbe.zig
@@ -704,15 +704,6 @@ pub fn addCases(ctx: *TestContext) !void {
":5:9: error: '_' is used to mark an enum as non-exhaustive and cannot be assigned a value",
});
- case.addError(
- \\const E1 = enum {};
- \\export fn foo() void {
- \\ _ = E1.a;
- \\}
- , &.{
- ":1:12: error: enum declarations must have at least one tag",
- });
-
case.addError(
\\const E1 = enum { a, b, _ };
\\export fn foo() void {
diff --git a/test/standalone.zig b/test/standalone.zig
index 76e46a1b62..bfd683ec4c 100644
--- a/test/standalone.zig
+++ b/test/standalone.zig
@@ -9,6 +9,7 @@ pub fn addCases(cases: *tests.StandaloneContext) void {
if (builtin.zig_backend == .stage1) { // https://github.com/ziglang/zig/issues/6025
cases.add("test/standalone/issue_9693/main.zig");
}
+ cases.add("test/standalone/issue_12471/main.zig");
cases.add("test/standalone/guess_number/main.zig");
cases.add("test/standalone/main_return_error/error_u8.zig");
cases.add("test/standalone/main_return_error/error_u8_non_zero.zig");
@@ -34,13 +35,16 @@ pub fn addCases(cases: *tests.StandaloneContext) void {
if (builtin.zig_backend == .stage1) { // https://github.com/ziglang/zig/issues/12194
cases.addBuildFile("test/standalone/issue_9812/build.zig", .{});
}
- cases.addBuildFile("test/standalone/issue_11595/build.zig", .{});
+ if (builtin.os.tag != .windows) {
+ // https://github.com/ziglang/zig/issues/12419
+ cases.addBuildFile("test/standalone/issue_11595/build.zig", .{});
+ }
if (builtin.os.tag != .wasi) {
cases.addBuildFile("test/standalone/load_dynamic_library/build.zig", .{});
}
// C ABI compatibility issue: https://github.com/ziglang/zig/issues/1481
if (builtin.cpu.arch == .x86_64) {
- if (builtin.zig_backend == .stage1) { // https://github.com/ziglang/zig/issues/12222
+ if (builtin.zig_backend == .stage1 or builtin.zig_backend == .stage2_llvm) { // https://github.com/ziglang/zig/issues/12222
cases.addBuildFile("test/c_abi/build.zig", .{});
}
}
diff --git a/test/standalone/issue_12471/main.zig b/test/standalone/issue_12471/main.zig
new file mode 100644
index 0000000000..08be1fd471
--- /dev/null
+++ b/test/standalone/issue_12471/main.zig
@@ -0,0 +1,12 @@
+const c = @cImport({
+ @cDefine("FOO", "FOO");
+ @cDefine("BAR", "FOO");
+
+ @cDefine("BAZ", "QUX");
+ @cDefine("QUX", "QUX");
+});
+
+pub fn main() u8 {
+ _ = c;
+ return 0;
+}
diff --git a/test/tests.zig b/test/tests.zig
index dcc891f878..a329233199 100644
--- a/test/tests.zig
+++ b/test/tests.zig
@@ -605,7 +605,6 @@ pub fn addPkgTests(
skip_libc: bool,
skip_stage1: bool,
skip_stage2: bool,
- is_stage1: bool,
) *build.Step {
const step = b.step(b.fmt("test-{s}", .{name}), desc);
@@ -633,14 +632,22 @@ pub fn addPkgTests(
if (test_target.backend) |backend| switch (backend) {
.stage1 => if (skip_stage1) continue,
+ .stage2_llvm => {},
else => if (skip_stage2) continue,
- } else if (is_stage1 and skip_stage1) continue;
+ };
const want_this_mode = for (modes) |m| {
if (m == test_target.mode) break true;
} else false;
if (!want_this_mode) continue;
+ if (test_target.backend) |backend| {
+ if (backend == .stage2_c and builtin.os.tag == .windows) {
+ // https://github.com/ziglang/zig/issues/12415
+ continue;
+ }
+ }
+
const libc_prefix = if (test_target.target.getOs().requiresLibC())
""
else if (test_target.link_libc)
@@ -917,7 +924,7 @@ pub const StackTracesContext = struct {
pos = marks[i] + delim.len;
}
// locate source basename
- pos = mem.lastIndexOfScalar(u8, line[0..marks[0]], fs.path.sep) orelse {
+ pos = mem.lastIndexOfAny(u8, line[0..marks[0]], "\\/") orelse {
// unexpected pattern: emit raw line and cont
try buf.appendSlice(line);
try buf.appendSlice("\n");
@@ -929,9 +936,9 @@ pub const StackTracesContext = struct {
try buf.appendSlice(line[pos + 1 .. marks[2] + delims[2].len]);
try buf.appendSlice(" [address]");
if (self.mode == .Debug) {
- if (mem.lastIndexOfScalar(u8, line[marks[4]..marks[5]], '.')) |idot| {
- // On certain platforms (windows) or possibly depending on how we choose to link main
- // the object file extension may be present so we simply strip any extension.
+ // On certain platforms (windows) or possibly depending on how we choose to link main
+ // the object file extension may be present so we simply strip any extension.
+ if (mem.indexOfScalar(u8, line[marks[4]..marks[5]], '.')) |idot| {
try buf.appendSlice(line[marks[3] .. marks[4] + idot]);
try buf.appendSlice(line[marks[5]..]);
} else {
diff --git a/test/translate_c.zig b/test/translate_c.zig
index 5a640c5b4b..637d491f49 100644
--- a/test/translate_c.zig
+++ b/test/translate_c.zig
@@ -1485,7 +1485,19 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
, &[_][]const u8{
\\pub export fn ptrcast() [*c]f32 {
\\ var a: [*c]c_int = undefined;
- \\ return @ptrCast([*c]f32, @alignCast(@import("std").meta.alignment(f32), a));
+ \\ return @ptrCast([*c]f32, @alignCast(@import("std").meta.alignment([*c]f32), a));
+ \\}
+ });
+
+ cases.add("casting pointer to pointer",
+ \\float **ptrptrcast() {
+ \\ int **a;
+ \\ return (float **)a;
+ \\}
+ , &[_][]const u8{
+ \\pub export fn ptrptrcast() [*c][*c]f32 {
+ \\ var a: [*c][*c]c_int = undefined;
+ \\ return @ptrCast([*c][*c]f32, @alignCast(@import("std").meta.alignment([*c][*c]f32), a));
\\}
});
@@ -1509,23 +1521,23 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\pub export fn test_ptr_cast() void {
\\ var p: ?*anyopaque = undefined;
\\ {
- \\ var to_char: [*c]u8 = @ptrCast([*c]u8, @alignCast(@import("std").meta.alignment(u8), p));
+ \\ var to_char: [*c]u8 = @ptrCast([*c]u8, @alignCast(@import("std").meta.alignment([*c]u8), p));
\\ _ = to_char;
- \\ var to_short: [*c]c_short = @ptrCast([*c]c_short, @alignCast(@import("std").meta.alignment(c_short), p));
+ \\ var to_short: [*c]c_short = @ptrCast([*c]c_short, @alignCast(@import("std").meta.alignment([*c]c_short), p));
\\ _ = to_short;
- \\ var to_int: [*c]c_int = @ptrCast([*c]c_int, @alignCast(@import("std").meta.alignment(c_int), p));
+ \\ var to_int: [*c]c_int = @ptrCast([*c]c_int, @alignCast(@import("std").meta.alignment([*c]c_int), p));
\\ _ = to_int;
- \\ var to_longlong: [*c]c_longlong = @ptrCast([*c]c_longlong, @alignCast(@import("std").meta.alignment(c_longlong), p));
+ \\ var to_longlong: [*c]c_longlong = @ptrCast([*c]c_longlong, @alignCast(@import("std").meta.alignment([*c]c_longlong), p));
\\ _ = to_longlong;
\\ }
\\ {
- \\ var to_char: [*c]u8 = @ptrCast([*c]u8, @alignCast(@import("std").meta.alignment(u8), p));
+ \\ var to_char: [*c]u8 = @ptrCast([*c]u8, @alignCast(@import("std").meta.alignment([*c]u8), p));
\\ _ = to_char;
- \\ var to_short: [*c]c_short = @ptrCast([*c]c_short, @alignCast(@import("std").meta.alignment(c_short), p));
+ \\ var to_short: [*c]c_short = @ptrCast([*c]c_short, @alignCast(@import("std").meta.alignment([*c]c_short), p));
\\ _ = to_short;
- \\ var to_int: [*c]c_int = @ptrCast([*c]c_int, @alignCast(@import("std").meta.alignment(c_int), p));
+ \\ var to_int: [*c]c_int = @ptrCast([*c]c_int, @alignCast(@import("std").meta.alignment([*c]c_int), p));
\\ _ = to_int;
- \\ var to_longlong: [*c]c_longlong = @ptrCast([*c]c_longlong, @alignCast(@import("std").meta.alignment(c_longlong), p));
+ \\ var to_longlong: [*c]c_longlong = @ptrCast([*c]c_longlong, @alignCast(@import("std").meta.alignment([*c]c_longlong), p));
\\ _ = to_longlong;
\\ }
\\}
@@ -3830,4 +3842,16 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
, &[_][]const u8{
\\pub const FOO = "";
});
+
+ cases.add("leading zeroes",
+ \\#define O_RDONLY 00
+ \\#define HELLO 000
+ \\#define ZERO 0
+ \\#define WORLD 00000123
+ , &[_][]const u8{
+ \\pub const O_RDONLY = @as(c_int, 0o0);
+ \\pub const HELLO = @as(c_int, 0o00);
+ \\pub const ZERO = @as(c_int, 0);
+ \\pub const WORLD = @as(c_int, 0o0000123);
+ });
}
diff --git a/tools/gen_spirv_spec.zig b/tools/gen_spirv_spec.zig
index 01c36ad72d..11ccce35a2 100644
--- a/tools/gen_spirv_spec.zig
+++ b/tools/gen_spirv_spec.zig
@@ -299,11 +299,11 @@ fn renderBitEnum(
for (enumerants) |enumerant, i| {
if (enumerant.value != .bitflag) return error.InvalidRegistry;
const value = try parseHexInt(enumerant.value.bitflag);
- if (@popCount(u32, value) == 0) {
+ if (@popCount(value) == 0) {
continue; // Skip 'none' items
}
- std.debug.assert(@popCount(u32, value) == 1);
+ std.debug.assert(@popCount(value) == 1);
var bitpos = std.math.log2_int(u32, value);
if (flags_by_bitpos[bitpos]) |*existing| {
diff --git a/tools/gen_stubs.zig b/tools/gen_stubs.zig
index a153d0f2eb..b7c1ae1647 100644
--- a/tools/gen_stubs.zig
+++ b/tools/gen_stubs.zig
@@ -389,7 +389,7 @@ fn parseElf(parse: Parse, comptime is_64: bool, comptime endian: builtin.Endian)
const S = struct {
fn endianSwap(x: anytype) @TypeOf(x) {
if (endian != native_endian) {
- return @byteSwap(@TypeOf(x), x);
+ return @byteSwap(x);
} else {
return x;
}
diff --git a/tools/update_clang_options.zig b/tools/update_clang_options.zig
index 32bb436ca3..92e0757ac7 100644
--- a/tools/update_clang_options.zig
+++ b/tools/update_clang_options.zig
@@ -352,6 +352,26 @@ const known_options = [_]KnownOpt{
.name = "fno-stack-check",
.ident = "no_stack_check",
},
+ .{
+ .name = "stack-protector",
+ .ident = "stack_protector",
+ },
+ .{
+ .name = "fstack-protector",
+ .ident = "stack_protector",
+ },
+ .{
+ .name = "fno-stack-protector",
+ .ident = "no_stack_protector",
+ },
+ .{
+ .name = "fstack-protector-strong",
+ .ident = "stack_protector",
+ },
+ .{
+ .name = "fstack-protector-all",
+ .ident = "stack_protector",
+ },
.{
.name = "MD",
.ident = "dep_file",
@@ -386,11 +406,15 @@ const known_options = [_]KnownOpt{
},
.{
.name = "MM",
- .ident = "dep_file_mm",
+ .ident = "dep_file_to_stdout",
+ },
+ .{
+ .name = "M",
+ .ident = "dep_file_to_stdout",
},
.{
.name = "user-dependencies",
- .ident = "dep_file_mm",
+ .ident = "dep_file_to_stdout",
},
.{
.name = "MMD",
@@ -648,9 +672,9 @@ pub fn main() anyerror!void {
\\ .name = "{s}",
\\ .syntax = {s},
\\ .zig_equivalent = .{s},
- \\ .pd1 = {any},
- \\ .pd2 = {any},
- \\ .psl = {any},
+ \\ .pd1 = {},
+ \\ .pd2 = {},
+ \\ .psl = {},
\\}},
\\
, .{ name, final_syntax, ident, pd1, pd2, pslash });
@@ -678,9 +702,9 @@ pub fn main() anyerror!void {
\\ .name = "{s}",
\\ .syntax = {s},
\\ .zig_equivalent = .other,
- \\ .pd1 = {any},
- \\ .pd2 = {any},
- \\ .psl = {any},
+ \\ .pd1 = {},
+ \\ .pd2 = {},
+ \\ .psl = {},
\\}},
\\
, .{ name, syntax, pd1, pd2, pslash });