mirror of
https://github.com/ziglang/zig.git
synced 2024-12-13 14:47:09 +00:00
stage1: memory/report overhaul
- split util_base.hpp from util.hpp - new namespaces: `mem` and `heap` - new `mem::Allocator` interface - new `heap::CAllocator` impl with global `heap::c_allocator` - new `heap::ArenaAllocator` impl - new `mem::TypeInfo` extracts names without RTTI - name extraction is enabled w/ ZIG_ENABLE_MEM_PROFILE=1 - new `mem::List` takes explicit `Allocator&` parameter - new `mem::HashMap` takes explicit `Allocator&` parameter - add Codegen.pass1_arena and use for all `ZigValue` allocs - deinit Codegen.pass1_arena early in `zig_llvm_emit_output()`
This commit is contained in:
parent
1cdefeb10b
commit
edb210905d
@ -450,7 +450,7 @@ set(ZIG_MAIN_SRC "${CMAKE_SOURCE_DIR}/src/main.cpp")
|
||||
set(ZIG0_SHIM_SRC "${CMAKE_SOURCE_DIR}/src/userland.cpp")
|
||||
|
||||
if(ZIG_ENABLE_MEM_PROFILE)
|
||||
set(ZIG_SOURCES_MEM_PROFILE "${CMAKE_SOURCE_DIR}/src/memory_profiling.cpp")
|
||||
set(ZIG_SOURCES_MEM_PROFILE "${CMAKE_SOURCE_DIR}/src/mem_profile.cpp")
|
||||
endif()
|
||||
|
||||
set(ZIG_SOURCES
|
||||
@ -466,10 +466,12 @@ set(ZIG_SOURCES
|
||||
"${CMAKE_SOURCE_DIR}/src/errmsg.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/error.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/glibc.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/heap.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/ir.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/ir_print.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/libc_installation.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/link.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/mem.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/os.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/parser.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/range_set.cpp"
|
||||
|
@ -2000,6 +2000,9 @@ struct CFile {
|
||||
|
||||
// When adding fields, check if they should be added to the hash computation in build_with_cache
|
||||
struct CodeGen {
|
||||
// arena allocator destroyed just prior to codegen emit
|
||||
heap::ArenaAllocator *pass1_arena;
|
||||
|
||||
//////////////////////////// Runtime State
|
||||
LLVMModuleRef module;
|
||||
ZigList<ErrorMsg*> errors;
|
||||
@ -2280,7 +2283,6 @@ struct ZigVar {
|
||||
Scope *parent_scope;
|
||||
Scope *child_scope;
|
||||
LLVMValueRef param_value_ref;
|
||||
IrExecutableSrc *owner_exec;
|
||||
|
||||
Buf *section_name;
|
||||
|
||||
|
189
src/analyze.cpp
189
src/analyze.cpp
@ -80,7 +80,7 @@ ErrorMsg *add_error_note(CodeGen *g, ErrorMsg *parent_msg, const AstNode *node,
|
||||
}
|
||||
|
||||
ZigType *new_type_table_entry(ZigTypeId id) {
|
||||
ZigType *entry = allocate<ZigType>(1);
|
||||
ZigType *entry = heap::c_allocator.create<ZigType>();
|
||||
entry->id = id;
|
||||
return entry;
|
||||
}
|
||||
@ -140,7 +140,7 @@ void init_scope(CodeGen *g, Scope *dest, ScopeId id, AstNode *source_node, Scope
|
||||
static ScopeDecls *create_decls_scope(CodeGen *g, AstNode *node, Scope *parent, ZigType *container_type,
|
||||
ZigType *import, Buf *bare_name)
|
||||
{
|
||||
ScopeDecls *scope = allocate<ScopeDecls>(1);
|
||||
ScopeDecls *scope = heap::c_allocator.create<ScopeDecls>();
|
||||
init_scope(g, &scope->base, ScopeIdDecls, node, parent);
|
||||
scope->decl_table.init(4);
|
||||
scope->container_type = container_type;
|
||||
@ -151,7 +151,7 @@ static ScopeDecls *create_decls_scope(CodeGen *g, AstNode *node, Scope *parent,
|
||||
|
||||
ScopeBlock *create_block_scope(CodeGen *g, AstNode *node, Scope *parent) {
|
||||
assert(node->type == NodeTypeBlock);
|
||||
ScopeBlock *scope = allocate<ScopeBlock>(1);
|
||||
ScopeBlock *scope = heap::c_allocator.create<ScopeBlock>();
|
||||
init_scope(g, &scope->base, ScopeIdBlock, node, parent);
|
||||
scope->name = node->data.block.name;
|
||||
return scope;
|
||||
@ -159,20 +159,20 @@ ScopeBlock *create_block_scope(CodeGen *g, AstNode *node, Scope *parent) {
|
||||
|
||||
ScopeDefer *create_defer_scope(CodeGen *g, AstNode *node, Scope *parent) {
|
||||
assert(node->type == NodeTypeDefer);
|
||||
ScopeDefer *scope = allocate<ScopeDefer>(1);
|
||||
ScopeDefer *scope = heap::c_allocator.create<ScopeDefer>();
|
||||
init_scope(g, &scope->base, ScopeIdDefer, node, parent);
|
||||
return scope;
|
||||
}
|
||||
|
||||
ScopeDeferExpr *create_defer_expr_scope(CodeGen *g, AstNode *node, Scope *parent) {
|
||||
assert(node->type == NodeTypeDefer);
|
||||
ScopeDeferExpr *scope = allocate<ScopeDeferExpr>(1);
|
||||
ScopeDeferExpr *scope = heap::c_allocator.create<ScopeDeferExpr>();
|
||||
init_scope(g, &scope->base, ScopeIdDeferExpr, node, parent);
|
||||
return scope;
|
||||
}
|
||||
|
||||
Scope *create_var_scope(CodeGen *g, AstNode *node, Scope *parent, ZigVar *var) {
|
||||
ScopeVarDecl *scope = allocate<ScopeVarDecl>(1);
|
||||
ScopeVarDecl *scope = heap::c_allocator.create<ScopeVarDecl>();
|
||||
init_scope(g, &scope->base, ScopeIdVarDecl, node, parent);
|
||||
scope->var = var;
|
||||
return &scope->base;
|
||||
@ -180,14 +180,14 @@ Scope *create_var_scope(CodeGen *g, AstNode *node, Scope *parent, ZigVar *var) {
|
||||
|
||||
ScopeCImport *create_cimport_scope(CodeGen *g, AstNode *node, Scope *parent) {
|
||||
assert(node->type == NodeTypeFnCallExpr);
|
||||
ScopeCImport *scope = allocate<ScopeCImport>(1);
|
||||
ScopeCImport *scope = heap::c_allocator.create<ScopeCImport>();
|
||||
init_scope(g, &scope->base, ScopeIdCImport, node, parent);
|
||||
buf_resize(&scope->buf, 0);
|
||||
return scope;
|
||||
}
|
||||
|
||||
ScopeLoop *create_loop_scope(CodeGen *g, AstNode *node, Scope *parent) {
|
||||
ScopeLoop *scope = allocate<ScopeLoop>(1);
|
||||
ScopeLoop *scope = heap::c_allocator.create<ScopeLoop>();
|
||||
init_scope(g, &scope->base, ScopeIdLoop, node, parent);
|
||||
if (node->type == NodeTypeWhileExpr) {
|
||||
scope->name = node->data.while_expr.name;
|
||||
@ -200,7 +200,7 @@ ScopeLoop *create_loop_scope(CodeGen *g, AstNode *node, Scope *parent) {
|
||||
}
|
||||
|
||||
Scope *create_runtime_scope(CodeGen *g, AstNode *node, Scope *parent, IrInstSrc *is_comptime) {
|
||||
ScopeRuntime *scope = allocate<ScopeRuntime>(1);
|
||||
ScopeRuntime *scope = heap::c_allocator.create<ScopeRuntime>();
|
||||
scope->is_comptime = is_comptime;
|
||||
init_scope(g, &scope->base, ScopeIdRuntime, node, parent);
|
||||
return &scope->base;
|
||||
@ -208,37 +208,37 @@ Scope *create_runtime_scope(CodeGen *g, AstNode *node, Scope *parent, IrInstSrc
|
||||
|
||||
ScopeSuspend *create_suspend_scope(CodeGen *g, AstNode *node, Scope *parent) {
|
||||
assert(node->type == NodeTypeSuspend);
|
||||
ScopeSuspend *scope = allocate<ScopeSuspend>(1);
|
||||
ScopeSuspend *scope = heap::c_allocator.create<ScopeSuspend>();
|
||||
init_scope(g, &scope->base, ScopeIdSuspend, node, parent);
|
||||
return scope;
|
||||
}
|
||||
|
||||
ScopeFnDef *create_fndef_scope(CodeGen *g, AstNode *node, Scope *parent, ZigFn *fn_entry) {
|
||||
ScopeFnDef *scope = allocate<ScopeFnDef>(1);
|
||||
ScopeFnDef *scope = heap::c_allocator.create<ScopeFnDef>();
|
||||
init_scope(g, &scope->base, ScopeIdFnDef, node, parent);
|
||||
scope->fn_entry = fn_entry;
|
||||
return scope;
|
||||
}
|
||||
|
||||
Scope *create_comptime_scope(CodeGen *g, AstNode *node, Scope *parent) {
|
||||
ScopeCompTime *scope = allocate<ScopeCompTime>(1);
|
||||
ScopeCompTime *scope = heap::c_allocator.create<ScopeCompTime>();
|
||||
init_scope(g, &scope->base, ScopeIdCompTime, node, parent);
|
||||
return &scope->base;
|
||||
}
|
||||
|
||||
Scope *create_typeof_scope(CodeGen *g, AstNode *node, Scope *parent) {
|
||||
ScopeTypeOf *scope = allocate<ScopeTypeOf>(1);
|
||||
ScopeTypeOf *scope = heap::c_allocator.create<ScopeTypeOf>();
|
||||
init_scope(g, &scope->base, ScopeIdTypeOf, node, parent);
|
||||
return &scope->base;
|
||||
}
|
||||
|
||||
ScopeExpr *create_expr_scope(CodeGen *g, AstNode *node, Scope *parent) {
|
||||
ScopeExpr *scope = allocate<ScopeExpr>(1);
|
||||
ScopeExpr *scope = heap::c_allocator.create<ScopeExpr>();
|
||||
init_scope(g, &scope->base, ScopeIdExpr, node, parent);
|
||||
ScopeExpr *parent_expr = find_expr_scope(parent);
|
||||
if (parent_expr != nullptr) {
|
||||
size_t new_len = parent_expr->children_len + 1;
|
||||
parent_expr->children_ptr = reallocate_nonzero<ScopeExpr *>(
|
||||
parent_expr->children_ptr = heap::c_allocator.reallocate_nonzero<ScopeExpr *>(
|
||||
parent_expr->children_ptr, parent_expr->children_len, new_len);
|
||||
parent_expr->children_ptr[parent_expr->children_len] = scope;
|
||||
parent_expr->children_len = new_len;
|
||||
@ -1104,8 +1104,8 @@ ZigValue *analyze_const_value(CodeGen *g, Scope *scope, AstNode *node, ZigType *
|
||||
{
|
||||
Error err;
|
||||
|
||||
ZigValue *result = create_const_vals(1);
|
||||
ZigValue *result_ptr = create_const_vals(1);
|
||||
ZigValue *result = g->pass1_arena->create<ZigValue>();
|
||||
ZigValue *result_ptr = g->pass1_arena->create<ZigValue>();
|
||||
result->special = ConstValSpecialUndef;
|
||||
result->type = (type_entry == nullptr) ? g->builtin_types.entry_var : type_entry;
|
||||
result_ptr->special = ConstValSpecialStatic;
|
||||
@ -1122,7 +1122,6 @@ ZigValue *analyze_const_value(CodeGen *g, Scope *scope, AstNode *node, ZigType *
|
||||
{
|
||||
return g->invalid_inst_gen->value;
|
||||
}
|
||||
destroy(result_ptr, "ZigValue");
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -1507,7 +1506,7 @@ void init_fn_type_id(FnTypeId *fn_type_id, AstNode *proto_node, CallingConventio
|
||||
|
||||
fn_type_id->cc = cc;
|
||||
fn_type_id->param_count = fn_proto->params.length;
|
||||
fn_type_id->param_info = allocate<FnTypeParamInfo>(param_count_alloc);
|
||||
fn_type_id->param_info = heap::c_allocator.allocate<FnTypeParamInfo>(param_count_alloc);
|
||||
fn_type_id->next_param_index = 0;
|
||||
fn_type_id->is_var_args = fn_proto->is_var_args;
|
||||
}
|
||||
@ -2171,7 +2170,7 @@ static Error resolve_struct_type(CodeGen *g, ZigType *struct_type) {
|
||||
bool packed = (struct_type->data.structure.layout == ContainerLayoutPacked);
|
||||
struct_type->data.structure.resolve_loop_flag_other = true;
|
||||
|
||||
uint32_t *host_int_bytes = packed ? allocate<uint32_t>(struct_type->data.structure.gen_field_count) : nullptr;
|
||||
uint32_t *host_int_bytes = packed ? heap::c_allocator.allocate<uint32_t>(struct_type->data.structure.gen_field_count) : nullptr;
|
||||
|
||||
size_t packed_bits_offset = 0;
|
||||
size_t next_offset = 0;
|
||||
@ -2657,7 +2656,7 @@ static Error resolve_enum_zero_bits(CodeGen *g, ZigType *enum_type) {
|
||||
}
|
||||
|
||||
enum_type->data.enumeration.src_field_count = field_count;
|
||||
enum_type->data.enumeration.fields = allocate<TypeEnumField>(field_count);
|
||||
enum_type->data.enumeration.fields = heap::c_allocator.allocate<TypeEnumField>(field_count);
|
||||
enum_type->data.enumeration.fields_by_name.init(field_count);
|
||||
|
||||
HashMap<BigInt, AstNode *, bigint_hash, bigint_eql> occupied_tag_values = {};
|
||||
@ -3034,7 +3033,7 @@ static Error resolve_union_zero_bits(CodeGen *g, ZigType *union_type) {
|
||||
return ErrorSemanticAnalyzeFail;
|
||||
}
|
||||
union_type->data.unionation.src_field_count = field_count;
|
||||
union_type->data.unionation.fields = allocate<TypeUnionField>(field_count);
|
||||
union_type->data.unionation.fields = heap::c_allocator.allocate<TypeUnionField>(field_count);
|
||||
union_type->data.unionation.fields_by_name.init(field_count);
|
||||
|
||||
Scope *scope = &union_type->data.unionation.decls_scope->base;
|
||||
@ -3053,7 +3052,7 @@ static Error resolve_union_zero_bits(CodeGen *g, ZigType *union_type) {
|
||||
if (create_enum_type) {
|
||||
occupied_tag_values.init(field_count);
|
||||
|
||||
di_enumerators = allocate<ZigLLVMDIEnumerator*>(field_count);
|
||||
di_enumerators = heap::c_allocator.allocate<ZigLLVMDIEnumerator*>(field_count);
|
||||
|
||||
ZigType *tag_int_type;
|
||||
if (enum_type_node != nullptr) {
|
||||
@ -3086,7 +3085,7 @@ static Error resolve_union_zero_bits(CodeGen *g, ZigType *union_type) {
|
||||
tag_type->data.enumeration.decl_node = decl_node;
|
||||
tag_type->data.enumeration.layout = ContainerLayoutAuto;
|
||||
tag_type->data.enumeration.src_field_count = field_count;
|
||||
tag_type->data.enumeration.fields = allocate<TypeEnumField>(field_count);
|
||||
tag_type->data.enumeration.fields = heap::c_allocator.allocate<TypeEnumField>(field_count);
|
||||
tag_type->data.enumeration.fields_by_name.init(field_count);
|
||||
tag_type->data.enumeration.decls_scope = union_type->data.unionation.decls_scope;
|
||||
} else if (enum_type_node != nullptr) {
|
||||
@ -3106,7 +3105,7 @@ static Error resolve_union_zero_bits(CodeGen *g, ZigType *union_type) {
|
||||
return err;
|
||||
}
|
||||
tag_type = enum_type;
|
||||
covered_enum_fields = allocate<bool>(enum_type->data.enumeration.src_field_count);
|
||||
covered_enum_fields = heap::c_allocator.allocate<bool>(enum_type->data.enumeration.src_field_count);
|
||||
} else {
|
||||
tag_type = nullptr;
|
||||
}
|
||||
@ -3244,7 +3243,7 @@ static Error resolve_union_zero_bits(CodeGen *g, ZigType *union_type) {
|
||||
}
|
||||
covered_enum_fields[union_field->enum_field->decl_index] = true;
|
||||
} else {
|
||||
union_field->enum_field = allocate<TypeEnumField>(1);
|
||||
union_field->enum_field = heap::c_allocator.create<TypeEnumField>();
|
||||
union_field->enum_field->name = field_name;
|
||||
union_field->enum_field->decl_index = i;
|
||||
bigint_init_unsigned(&union_field->enum_field->value, i);
|
||||
@ -3366,8 +3365,8 @@ static void get_fully_qualified_decl_name(CodeGen *g, Buf *buf, Tld *tld, bool i
|
||||
}
|
||||
|
||||
ZigFn *create_fn_raw(CodeGen *g, FnInline inline_value) {
|
||||
ZigFn *fn_entry = allocate<ZigFn>(1, "ZigFn");
|
||||
fn_entry->ir_executable = allocate<IrExecutableSrc>(1, "IrExecutableSrc");
|
||||
ZigFn *fn_entry = heap::c_allocator.create<ZigFn>();
|
||||
fn_entry->ir_executable = heap::c_allocator.create<IrExecutableSrc>();
|
||||
|
||||
fn_entry->prealloc_backward_branch_quota = default_backward_branch_quota;
|
||||
|
||||
@ -3642,7 +3641,7 @@ static void preview_test_decl(CodeGen *g, AstNode *node, ScopeDecls *decls_scope
|
||||
return;
|
||||
}
|
||||
|
||||
TldFn *tld_fn = allocate<TldFn>(1);
|
||||
TldFn *tld_fn = heap::c_allocator.create<TldFn>();
|
||||
init_tld(&tld_fn->base, TldIdFn, test_name, VisibModPrivate, node, &decls_scope->base);
|
||||
g->resolve_queue.append(&tld_fn->base);
|
||||
}
|
||||
@ -3650,7 +3649,7 @@ static void preview_test_decl(CodeGen *g, AstNode *node, ScopeDecls *decls_scope
|
||||
static void preview_comptime_decl(CodeGen *g, AstNode *node, ScopeDecls *decls_scope) {
|
||||
assert(node->type == NodeTypeCompTime);
|
||||
|
||||
TldCompTime *tld_comptime = allocate<TldCompTime>(1);
|
||||
TldCompTime *tld_comptime = heap::c_allocator.create<TldCompTime>();
|
||||
init_tld(&tld_comptime->base, TldIdCompTime, nullptr, VisibModPrivate, node, &decls_scope->base);
|
||||
g->resolve_queue.append(&tld_comptime->base);
|
||||
}
|
||||
@ -3673,7 +3672,7 @@ void update_compile_var(CodeGen *g, Buf *name, ZigValue *value) {
|
||||
resolve_top_level_decl(g, tld, tld->source_node, false);
|
||||
assert(tld->id == TldIdVar && tld->resolution == TldResolutionOk);
|
||||
TldVar *tld_var = (TldVar *)tld;
|
||||
copy_const_val(tld_var->var->const_value, value);
|
||||
copy_const_val(g, tld_var->var->const_value, value);
|
||||
tld_var->var->var_type = value->type;
|
||||
tld_var->var->align_bytes = get_abi_alignment(g, value->type);
|
||||
}
|
||||
@ -3693,7 +3692,7 @@ void scan_decls(CodeGen *g, ScopeDecls *decls_scope, AstNode *node) {
|
||||
{
|
||||
Buf *name = node->data.variable_declaration.symbol;
|
||||
VisibMod visib_mod = node->data.variable_declaration.visib_mod;
|
||||
TldVar *tld_var = allocate<TldVar>(1);
|
||||
TldVar *tld_var = heap::c_allocator.create<TldVar>();
|
||||
init_tld(&tld_var->base, TldIdVar, name, visib_mod, node, &decls_scope->base);
|
||||
tld_var->extern_lib_name = node->data.variable_declaration.lib_name;
|
||||
add_top_level_decl(g, decls_scope, &tld_var->base);
|
||||
@ -3709,7 +3708,7 @@ void scan_decls(CodeGen *g, ScopeDecls *decls_scope, AstNode *node) {
|
||||
}
|
||||
|
||||
VisibMod visib_mod = node->data.fn_proto.visib_mod;
|
||||
TldFn *tld_fn = allocate<TldFn>(1);
|
||||
TldFn *tld_fn = heap::c_allocator.create<TldFn>();
|
||||
init_tld(&tld_fn->base, TldIdFn, fn_name, visib_mod, node, &decls_scope->base);
|
||||
tld_fn->extern_lib_name = node->data.fn_proto.lib_name;
|
||||
add_top_level_decl(g, decls_scope, &tld_fn->base);
|
||||
@ -3718,7 +3717,7 @@ void scan_decls(CodeGen *g, ScopeDecls *decls_scope, AstNode *node) {
|
||||
}
|
||||
case NodeTypeUsingNamespace: {
|
||||
VisibMod visib_mod = node->data.using_namespace.visib_mod;
|
||||
TldUsingNamespace *tld_using_namespace = allocate<TldUsingNamespace>(1);
|
||||
TldUsingNamespace *tld_using_namespace = heap::c_allocator.create<TldUsingNamespace>();
|
||||
init_tld(&tld_using_namespace->base, TldIdUsingNamespace, nullptr, visib_mod, node, &decls_scope->base);
|
||||
add_top_level_decl(g, decls_scope, &tld_using_namespace->base);
|
||||
decls_scope->use_decls.append(tld_using_namespace);
|
||||
@ -3845,7 +3844,7 @@ ZigVar *add_variable(CodeGen *g, AstNode *source_node, Scope *parent_scope, Buf
|
||||
assert(const_value != nullptr);
|
||||
assert(var_type != nullptr);
|
||||
|
||||
ZigVar *variable_entry = allocate<ZigVar>(1);
|
||||
ZigVar *variable_entry = heap::c_allocator.create<ZigVar>();
|
||||
variable_entry->const_value = const_value;
|
||||
variable_entry->var_type = var_type;
|
||||
variable_entry->parent_scope = parent_scope;
|
||||
@ -3984,7 +3983,7 @@ static void resolve_decl_var(CodeGen *g, TldVar *tld_var, bool allow_lazy) {
|
||||
ZigType *type = explicit_type ? explicit_type : implicit_type;
|
||||
assert(type != nullptr); // should have been caught by the parser
|
||||
|
||||
ZigValue *init_val = (init_value != nullptr) ? init_value : create_const_runtime(type);
|
||||
ZigValue *init_val = (init_value != nullptr) ? init_value : create_const_runtime(g, type);
|
||||
|
||||
tld_var->var = add_variable(g, source_node, tld_var->base.parent_scope, var_decl->symbol,
|
||||
is_const, init_val, &tld_var->base, type);
|
||||
@ -4491,7 +4490,7 @@ static Error define_local_param_variables(CodeGen *g, ZigFn *fn_table_entry) {
|
||||
}
|
||||
|
||||
ZigVar *var = add_variable(g, param_decl_node, fn_table_entry->child_scope,
|
||||
param_name, true, create_const_runtime(param_type), nullptr, param_type);
|
||||
param_name, true, create_const_runtime(g, param_type), nullptr, param_type);
|
||||
var->src_arg_index = i;
|
||||
fn_table_entry->child_scope = var->child_scope;
|
||||
var->shadowable = var->shadowable || is_var_args;
|
||||
@ -4786,7 +4785,7 @@ static void analyze_fn_ir(CodeGen *g, ZigFn *fn, AstNode *return_type_node) {
|
||||
} else {
|
||||
return_err_set_type->data.error_set.err_count = inferred_err_set_type->data.error_set.err_count;
|
||||
if (inferred_err_set_type->data.error_set.err_count > 0) {
|
||||
return_err_set_type->data.error_set.errors = allocate<ErrorTableEntry *>(inferred_err_set_type->data.error_set.err_count);
|
||||
return_err_set_type->data.error_set.errors = heap::c_allocator.allocate<ErrorTableEntry *>(inferred_err_set_type->data.error_set.err_count);
|
||||
for (uint32_t i = 0; i < inferred_err_set_type->data.error_set.err_count; i += 1) {
|
||||
return_err_set_type->data.error_set.errors[i] = inferred_err_set_type->data.error_set.errors[i];
|
||||
}
|
||||
@ -4919,7 +4918,7 @@ ZigType *add_source_file(CodeGen *g, ZigPackage *package, Buf *resolved_path, Bu
|
||||
Buf *bare_name = buf_alloc();
|
||||
os_path_extname(src_basename, bare_name, nullptr);
|
||||
|
||||
RootStruct *root_struct = allocate<RootStruct>(1);
|
||||
RootStruct *root_struct = heap::c_allocator.create<RootStruct>();
|
||||
root_struct->package = package;
|
||||
root_struct->source_code = source_code;
|
||||
root_struct->line_offsets = tokenization.line_offsets;
|
||||
@ -4946,7 +4945,7 @@ ZigType *add_source_file(CodeGen *g, ZigPackage *package, Buf *resolved_path, Bu
|
||||
scan_decls(g, import_entry->data.structure.decls_scope, top_level_decl);
|
||||
}
|
||||
|
||||
TldContainer *tld_container = allocate<TldContainer>(1);
|
||||
TldContainer *tld_container = heap::c_allocator.create<TldContainer>();
|
||||
init_tld(&tld_container->base, TldIdContainer, namespace_name, VisibModPub, root_node, nullptr);
|
||||
tld_container->type_entry = import_entry;
|
||||
tld_container->decls_scope = import_entry->data.structure.decls_scope;
|
||||
@ -5694,14 +5693,14 @@ ZigValue *get_the_one_possible_value(CodeGen *g, ZigType *type_entry) {
|
||||
if (entry != nullptr) {
|
||||
return entry->value;
|
||||
}
|
||||
ZigValue *result = create_const_vals(1);
|
||||
ZigValue *result = g->pass1_arena->create<ZigValue>();
|
||||
result->type = type_entry;
|
||||
result->special = ConstValSpecialStatic;
|
||||
if (result->type->id == ZigTypeIdStruct) {
|
||||
// The fields array cannot be left unpopulated
|
||||
const ZigType *struct_type = result->type;
|
||||
const size_t field_count = struct_type->data.structure.src_field_count;
|
||||
result->data.x_struct.fields = alloc_const_vals_ptrs(field_count);
|
||||
result->data.x_struct.fields = alloc_const_vals_ptrs(g, field_count);
|
||||
for (size_t i = 0; i < field_count; i += 1) {
|
||||
TypeStructField *field = struct_type->data.structure.fields[i];
|
||||
ZigType *field_type = resolve_struct_field_type(g, field);
|
||||
@ -5786,7 +5785,7 @@ void init_const_str_lit(CodeGen *g, ZigValue *const_val, Buf *str) {
|
||||
}
|
||||
|
||||
// first we build the underlying array
|
||||
ZigValue *array_val = create_const_vals(1);
|
||||
ZigValue *array_val = g->pass1_arena->create<ZigValue>();
|
||||
array_val->special = ConstValSpecialStatic;
|
||||
array_val->type = get_array_type(g, g->builtin_types.entry_u8, buf_len(str), g->intern.for_zero_byte());
|
||||
array_val->data.x_array.special = ConstArraySpecialBuf;
|
||||
@ -5803,7 +5802,7 @@ void init_const_str_lit(CodeGen *g, ZigValue *const_val, Buf *str) {
|
||||
}
|
||||
|
||||
ZigValue *create_const_str_lit(CodeGen *g, Buf *str) {
|
||||
ZigValue *const_val = create_const_vals(1);
|
||||
ZigValue *const_val = g->pass1_arena->create<ZigValue>();
|
||||
init_const_str_lit(g, const_val, str);
|
||||
return const_val;
|
||||
}
|
||||
@ -5814,8 +5813,8 @@ void init_const_bigint(ZigValue *const_val, ZigType *type, const BigInt *bigint)
|
||||
bigint_init_bigint(&const_val->data.x_bigint, bigint);
|
||||
}
|
||||
|
||||
ZigValue *create_const_bigint(ZigType *type, const BigInt *bigint) {
|
||||
ZigValue *const_val = create_const_vals(1);
|
||||
ZigValue *create_const_bigint(CodeGen *g, ZigType *type, const BigInt *bigint) {
|
||||
ZigValue *const_val = g->pass1_arena->create<ZigValue>();
|
||||
init_const_bigint(const_val, type, bigint);
|
||||
return const_val;
|
||||
}
|
||||
@ -5828,8 +5827,8 @@ void init_const_unsigned_negative(ZigValue *const_val, ZigType *type, uint64_t x
|
||||
const_val->data.x_bigint.is_negative = negative;
|
||||
}
|
||||
|
||||
ZigValue *create_const_unsigned_negative(ZigType *type, uint64_t x, bool negative) {
|
||||
ZigValue *const_val = create_const_vals(1);
|
||||
ZigValue *create_const_unsigned_negative(CodeGen *g, ZigType *type, uint64_t x, bool negative) {
|
||||
ZigValue *const_val = g->pass1_arena->create<ZigValue>();
|
||||
init_const_unsigned_negative(const_val, type, x, negative);
|
||||
return const_val;
|
||||
}
|
||||
@ -5839,7 +5838,7 @@ void init_const_usize(CodeGen *g, ZigValue *const_val, uint64_t x) {
|
||||
}
|
||||
|
||||
ZigValue *create_const_usize(CodeGen *g, uint64_t x) {
|
||||
return create_const_unsigned_negative(g->builtin_types.entry_usize, x, false);
|
||||
return create_const_unsigned_negative(g, g->builtin_types.entry_usize, x, false);
|
||||
}
|
||||
|
||||
void init_const_signed(ZigValue *const_val, ZigType *type, int64_t x) {
|
||||
@ -5848,8 +5847,8 @@ void init_const_signed(ZigValue *const_val, ZigType *type, int64_t x) {
|
||||
bigint_init_signed(&const_val->data.x_bigint, x);
|
||||
}
|
||||
|
||||
ZigValue *create_const_signed(ZigType *type, int64_t x) {
|
||||
ZigValue *const_val = create_const_vals(1);
|
||||
ZigValue *create_const_signed(CodeGen *g, ZigType *type, int64_t x) {
|
||||
ZigValue *const_val = g->pass1_arena->create<ZigValue>();
|
||||
init_const_signed(const_val, type, x);
|
||||
return const_val;
|
||||
}
|
||||
@ -5860,8 +5859,8 @@ void init_const_null(ZigValue *const_val, ZigType *type) {
|
||||
const_val->data.x_optional = nullptr;
|
||||
}
|
||||
|
||||
ZigValue *create_const_null(ZigType *type) {
|
||||
ZigValue *const_val = create_const_vals(1);
|
||||
ZigValue *create_const_null(CodeGen *g, ZigType *type) {
|
||||
ZigValue *const_val = g->pass1_arena->create<ZigValue>();
|
||||
init_const_null(const_val, type);
|
||||
return const_val;
|
||||
}
|
||||
@ -5893,8 +5892,8 @@ void init_const_float(ZigValue *const_val, ZigType *type, double value) {
|
||||
}
|
||||
}
|
||||
|
||||
ZigValue *create_const_float(ZigType *type, double value) {
|
||||
ZigValue *const_val = create_const_vals(1);
|
||||
ZigValue *create_const_float(CodeGen *g, ZigType *type, double value) {
|
||||
ZigValue *const_val = g->pass1_arena->create<ZigValue>();
|
||||
init_const_float(const_val, type, value);
|
||||
return const_val;
|
||||
}
|
||||
@ -5905,8 +5904,8 @@ void init_const_enum(ZigValue *const_val, ZigType *type, const BigInt *tag) {
|
||||
bigint_init_bigint(&const_val->data.x_enum_tag, tag);
|
||||
}
|
||||
|
||||
ZigValue *create_const_enum(ZigType *type, const BigInt *tag) {
|
||||
ZigValue *const_val = create_const_vals(1);
|
||||
ZigValue *create_const_enum(CodeGen *g, ZigType *type, const BigInt *tag) {
|
||||
ZigValue *const_val = g->pass1_arena->create<ZigValue>();
|
||||
init_const_enum(const_val, type, tag);
|
||||
return const_val;
|
||||
}
|
||||
@ -5919,7 +5918,7 @@ void init_const_bool(CodeGen *g, ZigValue *const_val, bool value) {
|
||||
}
|
||||
|
||||
ZigValue *create_const_bool(CodeGen *g, bool value) {
|
||||
ZigValue *const_val = create_const_vals(1);
|
||||
ZigValue *const_val = g->pass1_arena->create<ZigValue>();
|
||||
init_const_bool(g, const_val, value);
|
||||
return const_val;
|
||||
}
|
||||
@ -5929,8 +5928,8 @@ void init_const_runtime(ZigValue *const_val, ZigType *type) {
|
||||
const_val->type = type;
|
||||
}
|
||||
|
||||
ZigValue *create_const_runtime(ZigType *type) {
|
||||
ZigValue *const_val = create_const_vals(1);
|
||||
ZigValue *create_const_runtime(CodeGen *g, ZigType *type) {
|
||||
ZigValue *const_val = g->pass1_arena->create<ZigValue>();
|
||||
init_const_runtime(const_val, type);
|
||||
return const_val;
|
||||
}
|
||||
@ -5942,7 +5941,7 @@ void init_const_type(CodeGen *g, ZigValue *const_val, ZigType *type_value) {
|
||||
}
|
||||
|
||||
ZigValue *create_const_type(CodeGen *g, ZigType *type_value) {
|
||||
ZigValue *const_val = create_const_vals(1);
|
||||
ZigValue *const_val = g->pass1_arena->create<ZigValue>();
|
||||
init_const_type(g, const_val, type_value);
|
||||
return const_val;
|
||||
}
|
||||
@ -5957,7 +5956,7 @@ void init_const_slice(CodeGen *g, ZigValue *const_val, ZigValue *array_val,
|
||||
|
||||
const_val->special = ConstValSpecialStatic;
|
||||
const_val->type = get_slice_type(g, ptr_type);
|
||||
const_val->data.x_struct.fields = alloc_const_vals_ptrs(2);
|
||||
const_val->data.x_struct.fields = alloc_const_vals_ptrs(g, 2);
|
||||
|
||||
init_const_ptr_array(g, const_val->data.x_struct.fields[slice_ptr_index], array_val, start, is_const,
|
||||
PtrLenUnknown);
|
||||
@ -5965,7 +5964,7 @@ void init_const_slice(CodeGen *g, ZigValue *const_val, ZigValue *array_val,
|
||||
}
|
||||
|
||||
ZigValue *create_const_slice(CodeGen *g, ZigValue *array_val, size_t start, size_t len, bool is_const) {
|
||||
ZigValue *const_val = create_const_vals(1);
|
||||
ZigValue *const_val = g->pass1_arena->create<ZigValue>();
|
||||
init_const_slice(g, const_val, array_val, start, len, is_const);
|
||||
return const_val;
|
||||
}
|
||||
@ -5987,7 +5986,7 @@ void init_const_ptr_array(CodeGen *g, ZigValue *const_val, ZigValue *array_val,
|
||||
ZigValue *create_const_ptr_array(CodeGen *g, ZigValue *array_val, size_t elem_index, bool is_const,
|
||||
PtrLen ptr_len)
|
||||
{
|
||||
ZigValue *const_val = create_const_vals(1);
|
||||
ZigValue *const_val = g->pass1_arena->create<ZigValue>();
|
||||
init_const_ptr_array(g, const_val, array_val, elem_index, is_const, ptr_len);
|
||||
return const_val;
|
||||
}
|
||||
@ -6000,7 +5999,7 @@ void init_const_ptr_ref(CodeGen *g, ZigValue *const_val, ZigValue *pointee_val,
|
||||
}
|
||||
|
||||
ZigValue *create_const_ptr_ref(CodeGen *g, ZigValue *pointee_val, bool is_const) {
|
||||
ZigValue *const_val = create_const_vals(1);
|
||||
ZigValue *const_val = g->pass1_arena->create<ZigValue>();
|
||||
init_const_ptr_ref(g, const_val, pointee_val, is_const);
|
||||
return const_val;
|
||||
}
|
||||
@ -6017,25 +6016,21 @@ void init_const_ptr_hard_coded_addr(CodeGen *g, ZigValue *const_val, ZigType *po
|
||||
ZigValue *create_const_ptr_hard_coded_addr(CodeGen *g, ZigType *pointee_type,
|
||||
size_t addr, bool is_const)
|
||||
{
|
||||
ZigValue *const_val = create_const_vals(1);
|
||||
ZigValue *const_val = g->pass1_arena->create<ZigValue>();
|
||||
init_const_ptr_hard_coded_addr(g, const_val, pointee_type, addr, is_const);
|
||||
return const_val;
|
||||
}
|
||||
|
||||
ZigValue *create_const_vals(size_t count) {
|
||||
return allocate<ZigValue>(count, "ZigValue");
|
||||
ZigValue **alloc_const_vals_ptrs(CodeGen *g, size_t count) {
|
||||
return realloc_const_vals_ptrs(g, nullptr, 0, count);
|
||||
}
|
||||
|
||||
ZigValue **alloc_const_vals_ptrs(size_t count) {
|
||||
return realloc_const_vals_ptrs(nullptr, 0, count);
|
||||
}
|
||||
|
||||
ZigValue **realloc_const_vals_ptrs(ZigValue **ptr, size_t old_count, size_t new_count) {
|
||||
ZigValue **realloc_const_vals_ptrs(CodeGen *g, ZigValue **ptr, size_t old_count, size_t new_count) {
|
||||
assert(new_count >= old_count);
|
||||
|
||||
size_t new_item_count = new_count - old_count;
|
||||
ZigValue **result = reallocate(ptr, old_count, new_count, "ZigValue*");
|
||||
ZigValue *vals = create_const_vals(new_item_count);
|
||||
ZigValue **result = heap::c_allocator.reallocate(ptr, old_count, new_count);
|
||||
ZigValue *vals = g->pass1_arena->allocate<ZigValue>(new_item_count);
|
||||
for (size_t i = old_count; i < new_count; i += 1) {
|
||||
result[i] = &vals[i - old_count];
|
||||
}
|
||||
@ -6050,8 +6045,8 @@ TypeStructField **realloc_type_struct_fields(TypeStructField **ptr, size_t old_c
|
||||
assert(new_count >= old_count);
|
||||
|
||||
size_t new_item_count = new_count - old_count;
|
||||
TypeStructField **result = reallocate(ptr, old_count, new_count, "TypeStructField*");
|
||||
TypeStructField *vals = allocate<TypeStructField>(new_item_count, "TypeStructField");
|
||||
TypeStructField **result = heap::c_allocator.reallocate(ptr, old_count, new_count);
|
||||
TypeStructField *vals = heap::c_allocator.allocate<TypeStructField>(new_item_count);
|
||||
for (size_t i = old_count; i < new_count; i += 1) {
|
||||
result[i] = &vals[i - old_count];
|
||||
}
|
||||
@ -6062,7 +6057,7 @@ static ZigType *get_async_fn_type(CodeGen *g, ZigType *orig_fn_type) {
|
||||
if (orig_fn_type->data.fn.fn_type_id.cc == CallingConventionAsync)
|
||||
return orig_fn_type;
|
||||
|
||||
ZigType *fn_type = allocate_nonzero<ZigType>(1);
|
||||
ZigType *fn_type = heap::c_allocator.allocate_nonzero<ZigType>(1);
|
||||
*fn_type = *orig_fn_type;
|
||||
fn_type->data.fn.fn_type_id.cc = CallingConventionAsync;
|
||||
fn_type->llvm_type = nullptr;
|
||||
@ -6236,11 +6231,11 @@ static Error resolve_async_frame(CodeGen *g, ZigType *frame_type) {
|
||||
ZigType *fn_type = get_async_fn_type(g, fn->type_entry);
|
||||
|
||||
if (fn->analyzed_executable.need_err_code_spill) {
|
||||
IrInstGenAlloca *alloca_gen = allocate<IrInstGenAlloca>(1);
|
||||
IrInstGenAlloca *alloca_gen = heap::c_allocator.create<IrInstGenAlloca>();
|
||||
alloca_gen->base.id = IrInstGenIdAlloca;
|
||||
alloca_gen->base.base.source_node = fn->proto_node;
|
||||
alloca_gen->base.base.scope = fn->child_scope;
|
||||
alloca_gen->base.value = allocate<ZigValue>(1, "ZigValue");
|
||||
alloca_gen->base.value = g->pass1_arena->create<ZigValue>();
|
||||
alloca_gen->base.value->type = get_pointer_to_type(g, g->builtin_types.entry_global_error_set, false);
|
||||
alloca_gen->base.base.ref_count = 1;
|
||||
alloca_gen->name_hint = "";
|
||||
@ -7375,7 +7370,7 @@ static void init_const_undefined(CodeGen *g, ZigValue *const_val) {
|
||||
|
||||
const_val->special = ConstValSpecialStatic;
|
||||
size_t field_count = wanted_type->data.structure.src_field_count;
|
||||
const_val->data.x_struct.fields = alloc_const_vals_ptrs(field_count);
|
||||
const_val->data.x_struct.fields = alloc_const_vals_ptrs(g, field_count);
|
||||
for (size_t i = 0; i < field_count; i += 1) {
|
||||
ZigValue *field_val = const_val->data.x_struct.fields[i];
|
||||
field_val->type = resolve_struct_field_type(g, wanted_type->data.structure.fields[i]);
|
||||
@ -7418,7 +7413,7 @@ void expand_undef_array(CodeGen *g, ZigValue *const_val) {
|
||||
return;
|
||||
case ConstArraySpecialUndef: {
|
||||
const_val->data.x_array.special = ConstArraySpecialNone;
|
||||
const_val->data.x_array.data.s_none.elements = create_const_vals(elem_count);
|
||||
const_val->data.x_array.data.s_none.elements = g->pass1_arena->allocate<ZigValue>(elem_count);
|
||||
for (size_t i = 0; i < elem_count; i += 1) {
|
||||
ZigValue *element_val = &const_val->data.x_array.data.s_none.elements[i];
|
||||
element_val->type = elem_type;
|
||||
@ -7437,7 +7432,7 @@ void expand_undef_array(CodeGen *g, ZigValue *const_val) {
|
||||
|
||||
const_val->data.x_array.special = ConstArraySpecialNone;
|
||||
assert(elem_count == buf_len(buf));
|
||||
const_val->data.x_array.data.s_none.elements = create_const_vals(elem_count);
|
||||
const_val->data.x_array.data.s_none.elements = g->pass1_arena->allocate<ZigValue>(elem_count);
|
||||
for (size_t i = 0; i < elem_count; i += 1) {
|
||||
ZigValue *this_char = &const_val->data.x_array.data.s_none.elements[i];
|
||||
this_char->special = ConstValSpecialStatic;
|
||||
@ -7609,7 +7604,7 @@ const char *type_id_name(ZigTypeId id) {
|
||||
}
|
||||
|
||||
LinkLib *create_link_lib(Buf *name) {
|
||||
LinkLib *link_lib = allocate<LinkLib>(1);
|
||||
LinkLib *link_lib = heap::c_allocator.create<LinkLib>();
|
||||
link_lib->name = name;
|
||||
return link_lib;
|
||||
}
|
||||
@ -8137,7 +8132,7 @@ static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveS
|
||||
|
||||
size_t field_count = struct_type->data.structure.src_field_count;
|
||||
// Every field could potentially have a generated padding field after it.
|
||||
LLVMTypeRef *element_types = allocate<LLVMTypeRef>(field_count * 2);
|
||||
LLVMTypeRef *element_types = heap::c_allocator.allocate<LLVMTypeRef>(field_count * 2);
|
||||
|
||||
bool packed = (struct_type->data.structure.layout == ContainerLayoutPacked);
|
||||
size_t packed_bits_offset = 0;
|
||||
@ -8272,7 +8267,7 @@ static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveS
|
||||
(unsigned)struct_type->data.structure.gen_field_count, packed);
|
||||
}
|
||||
|
||||
ZigLLVMDIType **di_element_types = allocate<ZigLLVMDIType*>(debug_field_count);
|
||||
ZigLLVMDIType **di_element_types = heap::c_allocator.allocate<ZigLLVMDIType*>(debug_field_count);
|
||||
size_t debug_field_index = 0;
|
||||
for (size_t i = 0; i < field_count; i += 1) {
|
||||
TypeStructField *field = struct_type->data.structure.fields[i];
|
||||
@ -8389,7 +8384,7 @@ static void resolve_llvm_types_enum(CodeGen *g, ZigType *enum_type, ResolveStatu
|
||||
uint32_t field_count = enum_type->data.enumeration.src_field_count;
|
||||
|
||||
assert(field_count == 0 || enum_type->data.enumeration.fields != nullptr);
|
||||
ZigLLVMDIEnumerator **di_enumerators = allocate<ZigLLVMDIEnumerator*>(field_count);
|
||||
ZigLLVMDIEnumerator **di_enumerators = heap::c_allocator.allocate<ZigLLVMDIEnumerator*>(field_count);
|
||||
|
||||
for (uint32_t i = 0; i < field_count; i += 1) {
|
||||
TypeEnumField *enum_field = &enum_type->data.enumeration.fields[i];
|
||||
@ -8456,7 +8451,7 @@ static void resolve_llvm_types_union(CodeGen *g, ZigType *union_type, ResolveSta
|
||||
if (ResolveStatusLLVMFwdDecl >= wanted_resolve_status) return;
|
||||
}
|
||||
|
||||
ZigLLVMDIType **union_inner_di_types = allocate<ZigLLVMDIType*>(gen_field_count);
|
||||
ZigLLVMDIType **union_inner_di_types = heap::c_allocator.allocate<ZigLLVMDIType*>(gen_field_count);
|
||||
uint32_t field_count = union_type->data.unionation.src_field_count;
|
||||
for (uint32_t i = 0; i < field_count; i += 1) {
|
||||
TypeUnionField *union_field = &union_type->data.unionation.fields[i];
|
||||
@ -8895,7 +8890,7 @@ static void resolve_llvm_types_fn_type(CodeGen *g, ZigType *fn_type) {
|
||||
param_di_types.append(get_llvm_di_type(g, gen_type));
|
||||
}
|
||||
if (is_async) {
|
||||
fn_type->data.fn.gen_param_info = allocate<FnGenParamInfo>(2);
|
||||
fn_type->data.fn.gen_param_info = heap::c_allocator.allocate<FnGenParamInfo>(2);
|
||||
|
||||
ZigType *frame_type = get_any_frame_type(g, fn_type_id->return_type);
|
||||
gen_param_types.append(get_llvm_type(g, frame_type));
|
||||
@ -8912,7 +8907,7 @@ static void resolve_llvm_types_fn_type(CodeGen *g, ZigType *fn_type) {
|
||||
fn_type->data.fn.gen_param_info[1].gen_index = 1;
|
||||
fn_type->data.fn.gen_param_info[1].type = g->builtin_types.entry_usize;
|
||||
} else {
|
||||
fn_type->data.fn.gen_param_info = allocate<FnGenParamInfo>(fn_type_id->param_count);
|
||||
fn_type->data.fn.gen_param_info = heap::c_allocator.allocate<FnGenParamInfo>(fn_type_id->param_count);
|
||||
for (size_t i = 0; i < fn_type_id->param_count; i += 1) {
|
||||
FnTypeParamInfo *src_param_info = &fn_type->data.fn.fn_type_id.param_info[i];
|
||||
ZigType *type_entry = src_param_info->type;
|
||||
@ -9369,7 +9364,7 @@ bool type_has_optional_repr(ZigType *ty) {
|
||||
}
|
||||
}
|
||||
|
||||
void copy_const_val(ZigValue *dest, ZigValue *src) {
|
||||
void copy_const_val(CodeGen *g, ZigValue *dest, ZigValue *src) {
|
||||
uint32_t prev_align = dest->llvm_align;
|
||||
ConstParent prev_parent = dest->parent;
|
||||
memcpy(dest, src, sizeof(ZigValue));
|
||||
@ -9378,26 +9373,26 @@ void copy_const_val(ZigValue *dest, ZigValue *src) {
|
||||
return;
|
||||
dest->parent = prev_parent;
|
||||
if (dest->type->id == ZigTypeIdStruct) {
|
||||
dest->data.x_struct.fields = alloc_const_vals_ptrs(dest->type->data.structure.src_field_count);
|
||||
dest->data.x_struct.fields = alloc_const_vals_ptrs(g, dest->type->data.structure.src_field_count);
|
||||
for (size_t i = 0; i < dest->type->data.structure.src_field_count; i += 1) {
|
||||
copy_const_val(dest->data.x_struct.fields[i], src->data.x_struct.fields[i]);
|
||||
copy_const_val(g, dest->data.x_struct.fields[i], src->data.x_struct.fields[i]);
|
||||
dest->data.x_struct.fields[i]->parent.id = ConstParentIdStruct;
|
||||
dest->data.x_struct.fields[i]->parent.data.p_struct.struct_val = dest;
|
||||
dest->data.x_struct.fields[i]->parent.data.p_struct.field_index = i;
|
||||
}
|
||||
} else if (dest->type->id == ZigTypeIdArray) {
|
||||
if (dest->data.x_array.special == ConstArraySpecialNone) {
|
||||
dest->data.x_array.data.s_none.elements = create_const_vals(dest->type->data.array.len);
|
||||
dest->data.x_array.data.s_none.elements = g->pass1_arena->allocate<ZigValue>(dest->type->data.array.len);
|
||||
for (uint64_t i = 0; i < dest->type->data.array.len; i += 1) {
|
||||
copy_const_val(&dest->data.x_array.data.s_none.elements[i], &src->data.x_array.data.s_none.elements[i]);
|
||||
copy_const_val(g, &dest->data.x_array.data.s_none.elements[i], &src->data.x_array.data.s_none.elements[i]);
|
||||
dest->data.x_array.data.s_none.elements[i].parent.id = ConstParentIdArray;
|
||||
dest->data.x_array.data.s_none.elements[i].parent.data.p_array.array_val = dest;
|
||||
dest->data.x_array.data.s_none.elements[i].parent.data.p_array.elem_index = i;
|
||||
}
|
||||
}
|
||||
} else if (type_has_optional_repr(dest->type) && dest->data.x_optional != nullptr) {
|
||||
dest->data.x_optional = create_const_vals(1);
|
||||
copy_const_val(dest->data.x_optional, src->data.x_optional);
|
||||
dest->data.x_optional = g->pass1_arena->create<ZigValue>();
|
||||
copy_const_val(g, dest->data.x_optional, src->data.x_optional);
|
||||
dest->data.x_optional->parent.id = ConstParentIdOptionalPayload;
|
||||
dest->data.x_optional->parent.data.p_optional_payload.optional_val = dest;
|
||||
}
|
||||
|
@ -128,22 +128,22 @@ void init_const_str_lit(CodeGen *g, ZigValue *const_val, Buf *str);
|
||||
ZigValue *create_const_str_lit(CodeGen *g, Buf *str);
|
||||
|
||||
void init_const_bigint(ZigValue *const_val, ZigType *type, const BigInt *bigint);
|
||||
ZigValue *create_const_bigint(ZigType *type, const BigInt *bigint);
|
||||
ZigValue *create_const_bigint(CodeGen *g, ZigType *type, const BigInt *bigint);
|
||||
|
||||
void init_const_unsigned_negative(ZigValue *const_val, ZigType *type, uint64_t x, bool negative);
|
||||
ZigValue *create_const_unsigned_negative(ZigType *type, uint64_t x, bool negative);
|
||||
ZigValue *create_const_unsigned_negative(CodeGen *g, ZigType *type, uint64_t x, bool negative);
|
||||
|
||||
void init_const_signed(ZigValue *const_val, ZigType *type, int64_t x);
|
||||
ZigValue *create_const_signed(ZigType *type, int64_t x);
|
||||
ZigValue *create_const_signed(CodeGen *g, ZigType *type, int64_t x);
|
||||
|
||||
void init_const_usize(CodeGen *g, ZigValue *const_val, uint64_t x);
|
||||
ZigValue *create_const_usize(CodeGen *g, uint64_t x);
|
||||
|
||||
void init_const_float(ZigValue *const_val, ZigType *type, double value);
|
||||
ZigValue *create_const_float(ZigType *type, double value);
|
||||
ZigValue *create_const_float(CodeGen *g, ZigType *type, double value);
|
||||
|
||||
void init_const_enum(ZigValue *const_val, ZigType *type, const BigInt *tag);
|
||||
ZigValue *create_const_enum(ZigType *type, const BigInt *tag);
|
||||
ZigValue *create_const_enum(CodeGen *g, ZigType *type, const BigInt *tag);
|
||||
|
||||
void init_const_bool(CodeGen *g, ZigValue *const_val, bool value);
|
||||
ZigValue *create_const_bool(CodeGen *g, bool value);
|
||||
@ -152,7 +152,7 @@ void init_const_type(CodeGen *g, ZigValue *const_val, ZigType *type_value);
|
||||
ZigValue *create_const_type(CodeGen *g, ZigType *type_value);
|
||||
|
||||
void init_const_runtime(ZigValue *const_val, ZigType *type);
|
||||
ZigValue *create_const_runtime(ZigType *type);
|
||||
ZigValue *create_const_runtime(CodeGen *g, ZigType *type);
|
||||
|
||||
void init_const_ptr_ref(CodeGen *g, ZigValue *const_val, ZigValue *pointee_val, bool is_const);
|
||||
ZigValue *create_const_ptr_ref(CodeGen *g, ZigValue *pointee_val, bool is_const);
|
||||
@ -172,11 +172,10 @@ void init_const_slice(CodeGen *g, ZigValue *const_val, ZigValue *array_val,
|
||||
ZigValue *create_const_slice(CodeGen *g, ZigValue *array_val, size_t start, size_t len, bool is_const);
|
||||
|
||||
void init_const_null(ZigValue *const_val, ZigType *type);
|
||||
ZigValue *create_const_null(ZigType *type);
|
||||
ZigValue *create_const_null(CodeGen *g, ZigType *type);
|
||||
|
||||
ZigValue *create_const_vals(size_t count);
|
||||
ZigValue **alloc_const_vals_ptrs(size_t count);
|
||||
ZigValue **realloc_const_vals_ptrs(ZigValue **ptr, size_t old_count, size_t new_count);
|
||||
ZigValue **alloc_const_vals_ptrs(CodeGen *g, size_t count);
|
||||
ZigValue **realloc_const_vals_ptrs(CodeGen *g, ZigValue **ptr, size_t old_count, size_t new_count);
|
||||
|
||||
TypeStructField **alloc_type_struct_fields(size_t count);
|
||||
TypeStructField **realloc_type_struct_fields(TypeStructField **ptr, size_t old_count, size_t new_count);
|
||||
@ -275,7 +274,7 @@ Error analyze_import(CodeGen *codegen, ZigType *source_import, Buf *import_targe
|
||||
ZigType **out_import, Buf **out_import_target_path, Buf *out_full_path);
|
||||
ZigValue *get_the_one_possible_value(CodeGen *g, ZigType *type_entry);
|
||||
bool is_anon_container(ZigType *ty);
|
||||
void copy_const_val(ZigValue *dest, ZigValue *src);
|
||||
void copy_const_val(CodeGen *g, ZigValue *dest, ZigValue *src);
|
||||
bool type_has_optional_repr(ZigType *ty);
|
||||
bool is_opt_err_set(ZigType *ty);
|
||||
bool type_is_numeric(ZigType *ty);
|
||||
|
@ -93,7 +93,7 @@ static void to_twos_complement(BigInt *dest, const BigInt *op, size_t bit_count)
|
||||
if (dest->data.digit == 0) dest->digit_count = 0;
|
||||
return;
|
||||
}
|
||||
dest->data.digits = allocate_nonzero<uint64_t>(dest->digit_count);
|
||||
dest->data.digits = heap::c_allocator.allocate_nonzero<uint64_t>(dest->digit_count);
|
||||
for (size_t i = 0; i < digits_to_copy; i += 1) {
|
||||
uint64_t digit = (i < op->digit_count) ? op_digits[i] : 0;
|
||||
dest->data.digits[i] = digit;
|
||||
@ -174,7 +174,7 @@ void bigint_init_data(BigInt *dest, const uint64_t *digits, size_t digit_count,
|
||||
|
||||
dest->digit_count = digit_count;
|
||||
dest->is_negative = is_negative;
|
||||
dest->data.digits = allocate_nonzero<uint64_t>(digit_count);
|
||||
dest->data.digits = heap::c_allocator.allocate_nonzero<uint64_t>(digit_count);
|
||||
memcpy(dest->data.digits, digits, sizeof(uint64_t) * digit_count);
|
||||
|
||||
bigint_normalize(dest);
|
||||
@ -191,13 +191,13 @@ void bigint_init_bigint(BigInt *dest, const BigInt *src) {
|
||||
}
|
||||
dest->is_negative = src->is_negative;
|
||||
dest->digit_count = src->digit_count;
|
||||
dest->data.digits = allocate_nonzero<uint64_t>(dest->digit_count);
|
||||
dest->data.digits = heap::c_allocator.allocate_nonzero<uint64_t>(dest->digit_count);
|
||||
memcpy(dest->data.digits, src->data.digits, sizeof(uint64_t) * dest->digit_count);
|
||||
}
|
||||
|
||||
void bigint_deinit(BigInt *bi) {
|
||||
if (bi->digit_count > 1)
|
||||
deallocate<uint64_t>(bi->data.digits, bi->digit_count);
|
||||
heap::c_allocator.deallocate(bi->data.digits, bi->digit_count);
|
||||
}
|
||||
|
||||
void bigint_init_bigfloat(BigInt *dest, const BigFloat *op) {
|
||||
@ -227,7 +227,7 @@ void bigint_init_bigfloat(BigInt *dest, const BigFloat *op) {
|
||||
f128M_rem(&abs_val, &max_u64, &remainder);
|
||||
|
||||
dest->digit_count = 2;
|
||||
dest->data.digits = allocate_nonzero<uint64_t>(dest->digit_count);
|
||||
dest->data.digits = heap::c_allocator.allocate_nonzero<uint64_t>(dest->digit_count);
|
||||
dest->data.digits[0] = f128M_to_ui64(&remainder, softfloat_round_minMag, false);
|
||||
dest->data.digits[1] = f128M_to_ui64(&amt, softfloat_round_minMag, false);
|
||||
bigint_normalize(dest);
|
||||
@ -345,7 +345,7 @@ void bigint_read_twos_complement(BigInt *dest, const uint8_t *buf, size_t bit_co
|
||||
if (dest->digit_count == 1) {
|
||||
digits = &dest->data.digit;
|
||||
} else {
|
||||
digits = allocate_nonzero<uint64_t>(dest->digit_count);
|
||||
digits = heap::c_allocator.allocate_nonzero<uint64_t>(dest->digit_count);
|
||||
dest->data.digits = digits;
|
||||
}
|
||||
|
||||
@ -464,7 +464,7 @@ void bigint_add(BigInt *dest, const BigInt *op1, const BigInt *op2) {
|
||||
}
|
||||
size_t i = 1;
|
||||
uint64_t first_digit = dest->data.digit;
|
||||
dest->data.digits = allocate_nonzero<uint64_t>(max(op1->digit_count, op2->digit_count) + 1);
|
||||
dest->data.digits = heap::c_allocator.allocate_nonzero<uint64_t>(max(op1->digit_count, op2->digit_count) + 1);
|
||||
dest->data.digits[0] = first_digit;
|
||||
|
||||
for (;;) {
|
||||
@ -532,7 +532,7 @@ void bigint_add(BigInt *dest, const BigInt *op1, const BigInt *op2) {
|
||||
return;
|
||||
}
|
||||
uint64_t first_digit = dest->data.digit;
|
||||
dest->data.digits = allocate_nonzero<uint64_t>(bigger_op->digit_count);
|
||||
dest->data.digits = heap::c_allocator.allocate_nonzero<uint64_t>(bigger_op->digit_count);
|
||||
dest->data.digits[0] = first_digit;
|
||||
size_t i = 1;
|
||||
|
||||
@ -1032,7 +1032,7 @@ static void bigint_unsigned_division(const BigInt *op1, const BigInt *op2, BigIn
|
||||
if (lhsWords == 1) {
|
||||
Quotient->data.digit = Make_64(Q[1], Q[0]);
|
||||
} else {
|
||||
Quotient->data.digits = allocate<uint64_t>(lhsWords);
|
||||
Quotient->data.digits = heap::c_allocator.allocate<uint64_t>(lhsWords);
|
||||
for (size_t i = 0; i < lhsWords; i += 1) {
|
||||
Quotient->data.digits[i] = Make_64(Q[i*2+1], Q[i*2]);
|
||||
}
|
||||
@ -1046,7 +1046,7 @@ static void bigint_unsigned_division(const BigInt *op1, const BigInt *op2, BigIn
|
||||
if (rhsWords == 1) {
|
||||
Remainder->data.digit = Make_64(R[1], R[0]);
|
||||
} else {
|
||||
Remainder->data.digits = allocate<uint64_t>(rhsWords);
|
||||
Remainder->data.digits = heap::c_allocator.allocate<uint64_t>(rhsWords);
|
||||
for (size_t i = 0; i < rhsWords; i += 1) {
|
||||
Remainder->data.digits[i] = Make_64(R[i*2+1], R[i*2]);
|
||||
}
|
||||
@ -1218,7 +1218,7 @@ void bigint_or(BigInt *dest, const BigInt *op1, const BigInt *op2) {
|
||||
return;
|
||||
}
|
||||
dest->digit_count = max(op1->digit_count, op2->digit_count);
|
||||
dest->data.digits = allocate_nonzero<uint64_t>(dest->digit_count);
|
||||
dest->data.digits = heap::c_allocator.allocate_nonzero<uint64_t>(dest->digit_count);
|
||||
for (size_t i = 0; i < dest->digit_count; i += 1) {
|
||||
uint64_t digit = 0;
|
||||
if (i < op1->digit_count) {
|
||||
@ -1262,7 +1262,7 @@ void bigint_and(BigInt *dest, const BigInt *op1, const BigInt *op2) {
|
||||
}
|
||||
|
||||
dest->digit_count = max(op1->digit_count, op2->digit_count);
|
||||
dest->data.digits = allocate_nonzero<uint64_t>(dest->digit_count);
|
||||
dest->data.digits = heap::c_allocator.allocate_nonzero<uint64_t>(dest->digit_count);
|
||||
|
||||
size_t i = 0;
|
||||
for (; i < op1->digit_count && i < op2->digit_count; i += 1) {
|
||||
@ -1308,7 +1308,7 @@ void bigint_xor(BigInt *dest, const BigInt *op1, const BigInt *op2) {
|
||||
return;
|
||||
}
|
||||
dest->digit_count = max(op1->digit_count, op2->digit_count);
|
||||
dest->data.digits = allocate_nonzero<uint64_t>(dest->digit_count);
|
||||
dest->data.digits = heap::c_allocator.allocate_nonzero<uint64_t>(dest->digit_count);
|
||||
size_t i = 0;
|
||||
for (; i < op1->digit_count && i < op2->digit_count; i += 1) {
|
||||
dest->data.digits[i] = op1_digits[i] ^ op2_digits[i];
|
||||
@ -1358,7 +1358,7 @@ void bigint_shl(BigInt *dest, const BigInt *op1, const BigInt *op2) {
|
||||
uint64_t digit_shift_count = shift_amt / 64;
|
||||
uint64_t leftover_shift_count = shift_amt % 64;
|
||||
|
||||
dest->data.digits = allocate<uint64_t>(op1->digit_count + digit_shift_count + 1);
|
||||
dest->data.digits = heap::c_allocator.allocate<uint64_t>(op1->digit_count + digit_shift_count + 1);
|
||||
dest->digit_count = digit_shift_count;
|
||||
uint64_t carry = 0;
|
||||
for (size_t i = 0; i < op1->digit_count; i += 1) {
|
||||
@ -1421,7 +1421,7 @@ void bigint_shr(BigInt *dest, const BigInt *op1, const BigInt *op2) {
|
||||
if (dest->digit_count == 1) {
|
||||
digits = &dest->data.digit;
|
||||
} else {
|
||||
digits = allocate<uint64_t>(dest->digit_count);
|
||||
digits = heap::c_allocator.allocate<uint64_t>(dest->digit_count);
|
||||
dest->data.digits = digits;
|
||||
}
|
||||
|
||||
@ -1492,7 +1492,7 @@ void bigint_not(BigInt *dest, const BigInt *op, size_t bit_count, bool is_signed
|
||||
}
|
||||
dest->digit_count = (bit_count + 63) / 64;
|
||||
assert(dest->digit_count >= op->digit_count);
|
||||
dest->data.digits = allocate_nonzero<uint64_t>(dest->digit_count);
|
||||
dest->data.digits = heap::c_allocator.allocate_nonzero<uint64_t>(dest->digit_count);
|
||||
size_t i = 0;
|
||||
for (; i < op->digit_count; i += 1) {
|
||||
dest->data.digits[i] = ~op_digits[i];
|
||||
|
@ -50,7 +50,7 @@ static inline void buf_resize(Buf *buf, size_t new_len) {
|
||||
}
|
||||
|
||||
static inline Buf *buf_alloc_fixed(size_t size) {
|
||||
Buf *buf = allocate<Buf>(1);
|
||||
Buf *buf = heap::c_allocator.create<Buf>();
|
||||
buf_resize(buf, size);
|
||||
return buf;
|
||||
}
|
||||
@ -65,7 +65,7 @@ static inline void buf_deinit(Buf *buf) {
|
||||
|
||||
static inline void buf_destroy(Buf *buf) {
|
||||
buf_deinit(buf);
|
||||
free(buf);
|
||||
heap::c_allocator.destroy(buf);
|
||||
}
|
||||
|
||||
static inline void buf_init_from_mem(Buf *buf, const char *ptr, size_t len) {
|
||||
@ -85,7 +85,7 @@ static inline void buf_init_from_buf(Buf *buf, Buf *other) {
|
||||
|
||||
static inline Buf *buf_create_from_mem(const char *ptr, size_t len) {
|
||||
assert(len != SIZE_MAX);
|
||||
Buf *buf = allocate<Buf>(1);
|
||||
Buf *buf = heap::c_allocator.create<Buf>();
|
||||
buf_init_from_mem(buf, ptr, len);
|
||||
return buf;
|
||||
}
|
||||
@ -108,7 +108,7 @@ static inline Buf *buf_slice(Buf *in_buf, size_t start, size_t end) {
|
||||
assert(end != SIZE_MAX);
|
||||
assert(start < buf_len(in_buf));
|
||||
assert(end <= buf_len(in_buf));
|
||||
Buf *out_buf = allocate<Buf>(1);
|
||||
Buf *out_buf = heap::c_allocator.create<Buf>();
|
||||
out_buf->list.resize(end - start + 1);
|
||||
memcpy(buf_ptr(out_buf), buf_ptr(in_buf) + start, end - start);
|
||||
out_buf->list.at(buf_len(out_buf)) = 0;
|
||||
@ -211,5 +211,4 @@ static inline void buf_replace(Buf* buf, char from, char to) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include "userland.h"
|
||||
#include "dump_analysis.hpp"
|
||||
#include "softfloat.hpp"
|
||||
#include "mem_profile.hpp"
|
||||
|
||||
#include <stdio.h>
|
||||
#include <errno.h>
|
||||
@ -57,7 +58,7 @@ static void init_darwin_native(CodeGen *g) {
|
||||
}
|
||||
|
||||
static ZigPackage *new_package(const char *root_src_dir, const char *root_src_path, const char *pkg_path) {
|
||||
ZigPackage *entry = allocate<ZigPackage>(1);
|
||||
ZigPackage *entry = heap::c_allocator.create<ZigPackage>();
|
||||
entry->package_table.init(4);
|
||||
buf_init_from_str(&entry->root_src_dir, root_src_dir);
|
||||
buf_init_from_str(&entry->root_src_path, root_src_path);
|
||||
@ -4327,7 +4328,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutableGen *executable, IrIn
|
||||
}
|
||||
size_t field_count = arg_calc.field_index;
|
||||
|
||||
LLVMTypeRef *field_types = allocate_nonzero<LLVMTypeRef>(field_count);
|
||||
LLVMTypeRef *field_types = heap::c_allocator.allocate_nonzero<LLVMTypeRef>(field_count);
|
||||
LLVMGetStructElementTypes(LLVMGetElementType(LLVMTypeOf(frame_result_loc)), field_types);
|
||||
assert(LLVMCountStructElementTypes(LLVMGetElementType(LLVMTypeOf(frame_result_loc))) == arg_calc_start.field_index);
|
||||
|
||||
@ -4680,8 +4681,8 @@ static LLVMValueRef ir_render_asm_gen(CodeGen *g, IrExecutableGen *executable, I
|
||||
instruction->return_count;
|
||||
size_t total_index = 0;
|
||||
size_t param_index = 0;
|
||||
LLVMTypeRef *param_types = allocate<LLVMTypeRef>(input_and_output_count);
|
||||
LLVMValueRef *param_values = allocate<LLVMValueRef>(input_and_output_count);
|
||||
LLVMTypeRef *param_types = heap::c_allocator.allocate<LLVMTypeRef>(input_and_output_count);
|
||||
LLVMValueRef *param_values = heap::c_allocator.allocate<LLVMValueRef>(input_and_output_count);
|
||||
for (size_t i = 0; i < asm_expr->output_list.length; i += 1, total_index += 1) {
|
||||
AsmOutput *asm_output = asm_expr->output_list.at(i);
|
||||
bool is_return = (asm_output->return_type != nullptr);
|
||||
@ -4923,7 +4924,7 @@ static LLVMValueRef ir_render_shuffle_vector(CodeGen *g, IrExecutableGen *execut
|
||||
// second vector. These start at -1 and go down, and are easiest to use
|
||||
// with the ~ operator. Here we convert between the two formats.
|
||||
IrInstGen *mask = instruction->mask;
|
||||
LLVMValueRef *values = allocate<LLVMValueRef>(len_mask);
|
||||
LLVMValueRef *values = heap::c_allocator.allocate<LLVMValueRef>(len_mask);
|
||||
for (uint64_t i = 0; i < len_mask; i++) {
|
||||
if (mask->value->data.x_array.data.s_none.elements[i].special == ConstValSpecialUndef) {
|
||||
values[i] = LLVMGetUndef(LLVMInt32Type());
|
||||
@ -4935,7 +4936,7 @@ static LLVMValueRef ir_render_shuffle_vector(CodeGen *g, IrExecutableGen *execut
|
||||
}
|
||||
|
||||
LLVMValueRef llvm_mask_value = LLVMConstVector(values, len_mask);
|
||||
free(values);
|
||||
heap::c_allocator.deallocate(values, len_mask);
|
||||
|
||||
return LLVMBuildShuffleVector(g->builder,
|
||||
ir_llvm_value(g, instruction->a),
|
||||
@ -5003,8 +5004,8 @@ static LLVMValueRef ir_render_phi(CodeGen *g, IrExecutableGen *executable, IrIns
|
||||
}
|
||||
|
||||
LLVMValueRef phi = LLVMBuildPhi(g->builder, phi_type, "");
|
||||
LLVMValueRef *incoming_values = allocate<LLVMValueRef>(instruction->incoming_count);
|
||||
LLVMBasicBlockRef *incoming_blocks = allocate<LLVMBasicBlockRef>(instruction->incoming_count);
|
||||
LLVMValueRef *incoming_values = heap::c_allocator.allocate<LLVMValueRef>(instruction->incoming_count);
|
||||
LLVMBasicBlockRef *incoming_blocks = heap::c_allocator.allocate<LLVMBasicBlockRef>(instruction->incoming_count);
|
||||
for (size_t i = 0; i < instruction->incoming_count; i += 1) {
|
||||
incoming_values[i] = ir_llvm_value(g, instruction->incoming_values[i]);
|
||||
incoming_blocks[i] = instruction->incoming_blocks[i]->llvm_exit_block;
|
||||
@ -5977,12 +5978,12 @@ static LLVMValueRef ir_render_bswap(CodeGen *g, IrExecutableGen *executable, IrI
|
||||
LLVMValueRef shift_amt = LLVMConstInt(get_llvm_type(g, extended_type), 8, false);
|
||||
if (is_vector) {
|
||||
extended_type = get_vector_type(g, expr_type->data.vector.len, extended_type);
|
||||
LLVMValueRef *values = allocate_nonzero<LLVMValueRef>(expr_type->data.vector.len);
|
||||
LLVMValueRef *values = heap::c_allocator.allocate_nonzero<LLVMValueRef>(expr_type->data.vector.len);
|
||||
for (uint32_t i = 0; i < expr_type->data.vector.len; i += 1) {
|
||||
values[i] = shift_amt;
|
||||
}
|
||||
shift_amt = LLVMConstVector(values, expr_type->data.vector.len);
|
||||
free(values);
|
||||
heap::c_allocator.deallocate(values, expr_type->data.vector.len);
|
||||
}
|
||||
// aabbcc
|
||||
LLVMValueRef extended = LLVMBuildZExt(g->builder, op, get_llvm_type(g, extended_type), "");
|
||||
@ -7015,7 +7016,7 @@ check: switch (const_val->special) {
|
||||
}
|
||||
case ZigTypeIdStruct:
|
||||
{
|
||||
LLVMValueRef *fields = allocate<LLVMValueRef>(type_entry->data.structure.gen_field_count);
|
||||
LLVMValueRef *fields = heap::c_allocator.allocate<LLVMValueRef>(type_entry->data.structure.gen_field_count);
|
||||
size_t src_field_count = type_entry->data.structure.src_field_count;
|
||||
bool make_unnamed_struct = false;
|
||||
assert(type_entry->data.structure.resolve_status == ResolveStatusLLVMFull);
|
||||
@ -7074,7 +7075,7 @@ check: switch (const_val->special) {
|
||||
} else {
|
||||
const LLVMValueRef AMT = LLVMConstInt(LLVMTypeOf(val), 8, false);
|
||||
|
||||
LLVMValueRef *values = allocate<LLVMValueRef>(size_in_bytes);
|
||||
LLVMValueRef *values = heap::c_allocator.allocate<LLVMValueRef>(size_in_bytes);
|
||||
for (size_t i = 0; i < size_in_bytes; i++) {
|
||||
const size_t idx = is_big_endian ? size_in_bytes - 1 - i : i;
|
||||
values[idx] = LLVMConstTruncOrBitCast(val, LLVMInt8Type());
|
||||
@ -7138,7 +7139,7 @@ check: switch (const_val->special) {
|
||||
case ConstArraySpecialNone: {
|
||||
uint64_t extra_len_from_sentinel = (type_entry->data.array.sentinel != nullptr) ? 1 : 0;
|
||||
uint64_t full_len = len + extra_len_from_sentinel;
|
||||
LLVMValueRef *values = allocate<LLVMValueRef>(full_len);
|
||||
LLVMValueRef *values = heap::c_allocator.allocate<LLVMValueRef>(full_len);
|
||||
LLVMTypeRef element_type_ref = get_llvm_type(g, type_entry->data.array.child_type);
|
||||
bool make_unnamed_struct = false;
|
||||
for (uint64_t i = 0; i < len; i += 1) {
|
||||
@ -7170,7 +7171,7 @@ check: switch (const_val->special) {
|
||||
case ConstArraySpecialUndef:
|
||||
return LLVMGetUndef(get_llvm_type(g, type_entry));
|
||||
case ConstArraySpecialNone: {
|
||||
LLVMValueRef *values = allocate<LLVMValueRef>(len);
|
||||
LLVMValueRef *values = heap::c_allocator.allocate<LLVMValueRef>(len);
|
||||
for (uint64_t i = 0; i < len; i += 1) {
|
||||
ZigValue *elem_value = &const_val->data.x_array.data.s_none.elements[i];
|
||||
values[i] = gen_const_val(g, elem_value, "");
|
||||
@ -7180,7 +7181,7 @@ check: switch (const_val->special) {
|
||||
case ConstArraySpecialBuf: {
|
||||
Buf *buf = const_val->data.x_array.data.s_buf;
|
||||
assert(buf_len(buf) == len);
|
||||
LLVMValueRef *values = allocate<LLVMValueRef>(len);
|
||||
LLVMValueRef *values = heap::c_allocator.allocate<LLVMValueRef>(len);
|
||||
for (uint64_t i = 0; i < len; i += 1) {
|
||||
values[i] = LLVMConstInt(g->builtin_types.entry_u8->llvm_type, buf_ptr(buf)[i], false);
|
||||
}
|
||||
@ -7382,7 +7383,7 @@ static void generate_error_name_table(CodeGen *g) {
|
||||
PtrLenUnknown, get_abi_alignment(g, g->builtin_types.entry_u8), 0, 0, false);
|
||||
ZigType *str_type = get_slice_type(g, u8_ptr_type);
|
||||
|
||||
LLVMValueRef *values = allocate<LLVMValueRef>(g->errors_by_index.length);
|
||||
LLVMValueRef *values = heap::c_allocator.allocate<LLVMValueRef>(g->errors_by_index.length);
|
||||
values[0] = LLVMGetUndef(get_llvm_type(g, str_type));
|
||||
for (size_t i = 1; i < g->errors_by_index.length; i += 1) {
|
||||
ErrorTableEntry *err_entry = g->errors_by_index.at(i);
|
||||
@ -7911,6 +7912,9 @@ static void do_code_gen(CodeGen *g) {
|
||||
}
|
||||
|
||||
static void zig_llvm_emit_output(CodeGen *g) {
|
||||
g->pass1_arena->destruct(&heap::c_allocator);
|
||||
g->pass1_arena = nullptr;
|
||||
|
||||
bool is_small = g->build_mode == BuildModeSmallRelease;
|
||||
|
||||
Buf *output_path = &g->o_file_output_path;
|
||||
@ -8207,7 +8211,7 @@ static void define_intern_values(CodeGen *g) {
|
||||
}
|
||||
|
||||
static BuiltinFnEntry *create_builtin_fn(CodeGen *g, BuiltinFnId id, const char *name, size_t count) {
|
||||
BuiltinFnEntry *builtin_fn = allocate<BuiltinFnEntry>(1);
|
||||
BuiltinFnEntry *builtin_fn = heap::c_allocator.create<BuiltinFnEntry>();
|
||||
buf_init_from_str(&builtin_fn->name, name);
|
||||
builtin_fn->id = id;
|
||||
builtin_fn->param_count = count;
|
||||
@ -8925,16 +8929,16 @@ static void init(CodeGen *g) {
|
||||
define_builtin_types(g);
|
||||
define_intern_values(g);
|
||||
|
||||
IrInstGen *sentinel_instructions = allocate<IrInstGen>(2);
|
||||
IrInstGen *sentinel_instructions = heap::c_allocator.allocate<IrInstGen>(2);
|
||||
g->invalid_inst_gen = &sentinel_instructions[0];
|
||||
g->invalid_inst_gen->value = allocate<ZigValue>(1, "ZigValue");
|
||||
g->invalid_inst_gen->value = g->pass1_arena->create<ZigValue>();
|
||||
g->invalid_inst_gen->value->type = g->builtin_types.entry_invalid;
|
||||
|
||||
g->unreach_instruction = &sentinel_instructions[1];
|
||||
g->unreach_instruction->value = allocate<ZigValue>(1, "ZigValue");
|
||||
g->unreach_instruction->value = g->pass1_arena->create<ZigValue>();
|
||||
g->unreach_instruction->value->type = g->builtin_types.entry_unreachable;
|
||||
|
||||
g->invalid_inst_src = allocate<IrInstSrc>(1);
|
||||
g->invalid_inst_src = heap::c_allocator.create<IrInstSrc>();
|
||||
|
||||
define_builtin_fns(g);
|
||||
Error err;
|
||||
@ -9016,7 +9020,7 @@ static void detect_libc(CodeGen *g) {
|
||||
buf_ptr(g->zig_lib_dir), target_os_name(g->zig_target->os));
|
||||
|
||||
g->libc_include_dir_len = 4;
|
||||
g->libc_include_dir_list = allocate<Buf*>(g->libc_include_dir_len);
|
||||
g->libc_include_dir_list = heap::c_allocator.allocate<Buf*>(g->libc_include_dir_len);
|
||||
g->libc_include_dir_list[0] = arch_include_dir;
|
||||
g->libc_include_dir_list[1] = generic_include_dir;
|
||||
g->libc_include_dir_list[2] = arch_os_include_dir;
|
||||
@ -9025,7 +9029,7 @@ static void detect_libc(CodeGen *g) {
|
||||
}
|
||||
|
||||
if (g->zig_target->is_native) {
|
||||
g->libc = allocate<ZigLibCInstallation>(1);
|
||||
g->libc = heap::c_allocator.create<ZigLibCInstallation>();
|
||||
|
||||
// search for native_libc.txt in following dirs:
|
||||
// - LOCAL_CACHE_DIR
|
||||
@ -9105,7 +9109,7 @@ static void detect_libc(CodeGen *g) {
|
||||
size_t want_um_and_shared_dirs = (g->zig_target->os == OsWindows) ? 2 : 0;
|
||||
size_t dir_count = 1 + want_sys_dir + want_um_and_shared_dirs;
|
||||
g->libc_include_dir_len = 0;
|
||||
g->libc_include_dir_list = allocate<Buf*>(dir_count);
|
||||
g->libc_include_dir_list = heap::c_allocator.allocate<Buf*>(dir_count);
|
||||
|
||||
g->libc_include_dir_list[g->libc_include_dir_len] = &g->libc->include_dir;
|
||||
g->libc_include_dir_len += 1;
|
||||
@ -9472,10 +9476,10 @@ static void update_test_functions_builtin_decl(CodeGen *g) {
|
||||
if ((err = type_resolve(g, struct_type, ResolveStatusSizeKnown)))
|
||||
zig_unreachable();
|
||||
|
||||
ZigValue *test_fn_array = create_const_vals(1);
|
||||
ZigValue *test_fn_array = g->pass1_arena->create<ZigValue>();
|
||||
test_fn_array->type = get_array_type(g, struct_type, g->test_fns.length, nullptr);
|
||||
test_fn_array->special = ConstValSpecialStatic;
|
||||
test_fn_array->data.x_array.data.s_none.elements = create_const_vals(g->test_fns.length);
|
||||
test_fn_array->data.x_array.data.s_none.elements = g->pass1_arena->allocate<ZigValue>(g->test_fns.length);
|
||||
|
||||
for (size_t i = 0; i < g->test_fns.length; i += 1) {
|
||||
ZigFn *test_fn_entry = g->test_fns.at(i);
|
||||
@ -9486,7 +9490,7 @@ static void update_test_functions_builtin_decl(CodeGen *g) {
|
||||
this_val->parent.id = ConstParentIdArray;
|
||||
this_val->parent.data.p_array.array_val = test_fn_array;
|
||||
this_val->parent.data.p_array.elem_index = i;
|
||||
this_val->data.x_struct.fields = alloc_const_vals_ptrs(3);
|
||||
this_val->data.x_struct.fields = alloc_const_vals_ptrs(g, 3);
|
||||
|
||||
ZigValue *name_field = this_val->data.x_struct.fields[0];
|
||||
ZigValue *name_array_val = create_const_str_lit(g, &test_fn_entry->symbol_name)->data.x_ptr.data.ref.pointee;
|
||||
@ -9505,7 +9509,7 @@ static void update_test_functions_builtin_decl(CodeGen *g) {
|
||||
frame_size_field->data.x_optional = nullptr;
|
||||
|
||||
if (fn_is_async(test_fn_entry)) {
|
||||
frame_size_field->data.x_optional = create_const_vals(1);
|
||||
frame_size_field->data.x_optional = g->pass1_arena->create<ZigValue>();
|
||||
frame_size_field->data.x_optional->special = ConstValSpecialStatic;
|
||||
frame_size_field->data.x_optional->type = g->builtin_types.entry_usize;
|
||||
bigint_init_unsigned(&frame_size_field->data.x_optional->data.x_bigint,
|
||||
@ -9640,7 +9644,7 @@ static Error get_tmp_filename(CodeGen *g, Buf *out, Buf *suffix) {
|
||||
|
||||
Error create_c_object_cache(CodeGen *g, CacheHash **out_cache_hash, bool verbose) {
|
||||
Error err;
|
||||
CacheHash *cache_hash = allocate<CacheHash>(1);
|
||||
CacheHash *cache_hash = heap::c_allocator.create<CacheHash>();
|
||||
Buf *manifest_dir = buf_sprintf("%s" OS_SEP CACHE_HASH_SUBDIR, buf_ptr(g->cache_dir));
|
||||
cache_init(cache_hash, manifest_dir);
|
||||
|
||||
@ -10794,7 +10798,8 @@ CodeGen *codegen_create(Buf *main_pkg_path, Buf *root_src_path, const ZigTarget
|
||||
OutType out_type, BuildMode build_mode, Buf *override_lib_dir,
|
||||
ZigLibCInstallation *libc, Buf *cache_dir, bool is_test_build, Stage2ProgressNode *progress_node)
|
||||
{
|
||||
CodeGen *g = allocate<CodeGen>(1);
|
||||
CodeGen *g = heap::c_allocator.create<CodeGen>();
|
||||
g->pass1_arena = heap::ArenaAllocator::construct(&heap::c_allocator, &heap::c_allocator, "pass1");
|
||||
g->main_progress_node = progress_node;
|
||||
|
||||
codegen_add_time_event(g, "Initialize");
|
||||
@ -10937,35 +10942,35 @@ void codegen_switch_sub_prog_node(CodeGen *g, Stage2ProgressNode *node) {
|
||||
|
||||
ZigValue *CodeGen::Intern::for_undefined() {
|
||||
#ifdef ZIG_ENABLE_MEM_PROFILE
|
||||
memprof_intern_count.x_undefined += 1;
|
||||
mem::intern_counters.x_undefined += 1;
|
||||
#endif
|
||||
return &this->x_undefined;
|
||||
}
|
||||
|
||||
ZigValue *CodeGen::Intern::for_void() {
|
||||
#ifdef ZIG_ENABLE_MEM_PROFILE
|
||||
memprof_intern_count.x_void += 1;
|
||||
mem::intern_counters.x_void += 1;
|
||||
#endif
|
||||
return &this->x_void;
|
||||
}
|
||||
|
||||
ZigValue *CodeGen::Intern::for_null() {
|
||||
#ifdef ZIG_ENABLE_MEM_PROFILE
|
||||
memprof_intern_count.x_null += 1;
|
||||
mem::intern_counters.x_null += 1;
|
||||
#endif
|
||||
return &this->x_null;
|
||||
}
|
||||
|
||||
ZigValue *CodeGen::Intern::for_unreachable() {
|
||||
#ifdef ZIG_ENABLE_MEM_PROFILE
|
||||
memprof_intern_count.x_unreachable += 1;
|
||||
mem::intern_counters.x_unreachable += 1;
|
||||
#endif
|
||||
return &this->x_unreachable;
|
||||
}
|
||||
|
||||
ZigValue *CodeGen::Intern::for_zero_byte() {
|
||||
#ifdef ZIG_ENABLE_MEM_PROFILE
|
||||
memprof_intern_count.zero_byte += 1;
|
||||
mem::intern_counters.zero_byte += 1;
|
||||
#endif
|
||||
return &this->zero_byte;
|
||||
}
|
||||
|
@ -99,7 +99,7 @@ void err_msg_add_note(ErrorMsg *parent, ErrorMsg *note) {
|
||||
ErrorMsg *err_msg_create_with_offset(Buf *path, size_t line, size_t column, size_t offset,
|
||||
const char *source, Buf *msg)
|
||||
{
|
||||
ErrorMsg *err_msg = allocate<ErrorMsg>(1);
|
||||
ErrorMsg *err_msg = heap::c_allocator.create<ErrorMsg>();
|
||||
err_msg->path = path;
|
||||
err_msg->line_start = line;
|
||||
err_msg->column_start = column;
|
||||
@ -138,7 +138,7 @@ ErrorMsg *err_msg_create_with_offset(Buf *path, size_t line, size_t column, size
|
||||
ErrorMsg *err_msg_create_with_line(Buf *path, size_t line, size_t column,
|
||||
Buf *source, ZigList<size_t> *line_offsets, Buf *msg)
|
||||
{
|
||||
ErrorMsg *err_msg = allocate<ErrorMsg>(1);
|
||||
ErrorMsg *err_msg = heap::c_allocator.create<ErrorMsg>();
|
||||
err_msg->path = path;
|
||||
err_msg->line_start = line;
|
||||
err_msg->column_start = column;
|
||||
|
@ -21,7 +21,7 @@ static const ZigGLibCLib glibc_libs[] = {
|
||||
Error glibc_load_metadata(ZigGLibCAbi **out_result, Buf *zig_lib_dir, bool verbose) {
|
||||
Error err;
|
||||
|
||||
ZigGLibCAbi *glibc_abi = allocate<ZigGLibCAbi>(1);
|
||||
ZigGLibCAbi *glibc_abi = heap::c_allocator.create<ZigGLibCAbi>();
|
||||
glibc_abi->vers_txt_path = buf_sprintf("%s" OS_SEP "libc" OS_SEP "glibc" OS_SEP "vers.txt", buf_ptr(zig_lib_dir));
|
||||
glibc_abi->fns_txt_path = buf_sprintf("%s" OS_SEP "libc" OS_SEP "glibc" OS_SEP "fns.txt", buf_ptr(zig_lib_dir));
|
||||
glibc_abi->abi_txt_path = buf_sprintf("%s" OS_SEP "libc" OS_SEP "glibc" OS_SEP "abi.txt", buf_ptr(zig_lib_dir));
|
||||
@ -100,10 +100,10 @@ Error glibc_load_metadata(ZigGLibCAbi **out_result, Buf *zig_lib_dir, bool verbo
|
||||
Optional<Slice<uint8_t>> opt_line = SplitIterator_next_separate(&it);
|
||||
if (!opt_line.is_some) break;
|
||||
|
||||
ver_list_base = allocate<ZigGLibCVerList>(glibc_abi->all_functions.length);
|
||||
ver_list_base = heap::c_allocator.allocate<ZigGLibCVerList>(glibc_abi->all_functions.length);
|
||||
SplitIterator line_it = memSplit(opt_line.value, str(" "));
|
||||
for (;;) {
|
||||
ZigTarget *target = allocate<ZigTarget>(1);
|
||||
ZigTarget *target = heap::c_allocator.create<ZigTarget>();
|
||||
Optional<Slice<uint8_t>> opt_target = SplitIterator_next(&line_it);
|
||||
if (!opt_target.is_some) break;
|
||||
|
||||
@ -174,7 +174,7 @@ Error glibc_build_dummies_and_maps(CodeGen *g, const ZigGLibCAbi *glibc_abi, con
|
||||
Error err;
|
||||
|
||||
Buf *cache_dir = get_global_cache_dir();
|
||||
CacheHash *cache_hash = allocate<CacheHash>(1);
|
||||
CacheHash *cache_hash = heap::c_allocator.create<CacheHash>();
|
||||
Buf *manifest_dir = buf_sprintf("%s" OS_SEP CACHE_HASH_SUBDIR, buf_ptr(cache_dir));
|
||||
cache_init(cache_hash, manifest_dir);
|
||||
|
||||
|
@ -19,7 +19,7 @@ public:
|
||||
init_capacity(capacity);
|
||||
}
|
||||
void deinit(void) {
|
||||
free(_entries);
|
||||
heap::c_allocator.deallocate(_entries, _capacity);
|
||||
}
|
||||
|
||||
struct Entry {
|
||||
@ -57,7 +57,7 @@ public:
|
||||
if (old_entry->used)
|
||||
internal_put(old_entry->key, old_entry->value);
|
||||
}
|
||||
free(old_entries);
|
||||
heap::c_allocator.deallocate(old_entries, old_capacity);
|
||||
}
|
||||
}
|
||||
|
||||
@ -164,7 +164,7 @@ private:
|
||||
|
||||
void init_capacity(int capacity) {
|
||||
_capacity = capacity;
|
||||
_entries = allocate<Entry>(_capacity);
|
||||
_entries = heap::c_allocator.allocate<Entry>(_capacity);
|
||||
_size = 0;
|
||||
_max_distance_from_start_index = 0;
|
||||
for (int i = 0; i < _capacity; i += 1) {
|
||||
|
377
src/heap.cpp
Normal file
377
src/heap.cpp
Normal file
@ -0,0 +1,377 @@
|
||||
/*
|
||||
* Copyright (c) 2020 Andrew Kelley
|
||||
*
|
||||
* This file is part of zig, which is MIT licensed.
|
||||
* See http://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
#include <new>
|
||||
#include <string.h>
|
||||
|
||||
#include "config.h"
|
||||
#include "heap.hpp"
|
||||
#include "mem_profile.hpp"
|
||||
|
||||
namespace heap {
|
||||
|
||||
extern mem::Allocator &bootstrap_allocator;
|
||||
|
||||
//
|
||||
// BootstrapAllocator implementation is identical to CAllocator minus
|
||||
// profile profile functionality. Splitting off to a base interface doesn't
|
||||
// seem worthwhile.
|
||||
//
|
||||
|
||||
void BootstrapAllocator::init(const char *name) {}
|
||||
void BootstrapAllocator::deinit() {}
|
||||
|
||||
void *BootstrapAllocator::internal_allocate(const mem::TypeInfo &info, size_t count) {
|
||||
return mem::os::calloc(count, info.size);
|
||||
}
|
||||
|
||||
void *BootstrapAllocator::internal_allocate_nonzero(const mem::TypeInfo &info, size_t count) {
|
||||
return mem::os::malloc(count * info.size);
|
||||
}
|
||||
|
||||
void *BootstrapAllocator::internal_reallocate(const mem::TypeInfo &info, void *old_ptr, size_t old_count, size_t new_count) {
|
||||
auto new_ptr = this->internal_reallocate_nonzero(info, old_ptr, old_count, new_count);
|
||||
if (new_count > old_count)
|
||||
memset(reinterpret_cast<uint8_t *>(new_ptr) + (old_count * info.size), 0, (new_count - old_count) * info.size);
|
||||
return new_ptr;
|
||||
}
|
||||
|
||||
void *BootstrapAllocator::internal_reallocate_nonzero(const mem::TypeInfo &info, void *old_ptr, size_t old_count, size_t new_count) {
|
||||
return mem::os::realloc(old_ptr, new_count * info.size);
|
||||
}
|
||||
|
||||
void BootstrapAllocator::internal_deallocate(const mem::TypeInfo &info, void *ptr, size_t count) {
|
||||
mem::os::free(ptr);
|
||||
}
|
||||
|
||||
void CAllocator::init(const char *name) {
|
||||
#ifdef ZIG_ENABLE_MEM_PROFILE
|
||||
this->profile = bootstrap_allocator.create<mem::Profile>();
|
||||
this->profile->init(name, "CAllocator");
|
||||
#endif
|
||||
}
|
||||
|
||||
void CAllocator::deinit() {
|
||||
#ifdef ZIG_ENABLE_MEM_PROFILE
|
||||
assert(this->profile);
|
||||
this->profile->deinit();
|
||||
bootstrap_allocator.destroy(this->profile);
|
||||
this->profile = nullptr;
|
||||
#endif
|
||||
}
|
||||
|
||||
CAllocator *CAllocator::construct(mem::Allocator *allocator, const char *name) {
|
||||
auto p = new(allocator->create<CAllocator>()) CAllocator();
|
||||
p->init(name);
|
||||
return p;
|
||||
}
|
||||
|
||||
void CAllocator::destruct(mem::Allocator *allocator) {
|
||||
this->deinit();
|
||||
allocator->destroy(this);
|
||||
}
|
||||
|
||||
#ifdef ZIG_ENABLE_MEM_PROFILE
|
||||
void CAllocator::print_report(FILE *file) {
|
||||
this->profile->print_report(file);
|
||||
}
|
||||
#endif
|
||||
|
||||
void *CAllocator::internal_allocate(const mem::TypeInfo &info, size_t count) {
|
||||
#ifdef ZIG_ENABLE_MEM_PROFILE
|
||||
this->profile->record_alloc(info, count);
|
||||
#endif
|
||||
return mem::os::calloc(count, info.size);
|
||||
}
|
||||
|
||||
void *CAllocator::internal_allocate_nonzero(const mem::TypeInfo &info, size_t count) {
|
||||
#ifdef ZIG_ENABLE_MEM_PROFILE
|
||||
this->profile->record_alloc(info, count);
|
||||
#endif
|
||||
return mem::os::malloc(count * info.size);
|
||||
}
|
||||
|
||||
void *CAllocator::internal_reallocate(const mem::TypeInfo &info, void *old_ptr, size_t old_count, size_t new_count) {
|
||||
auto new_ptr = this->internal_reallocate_nonzero(info, old_ptr, old_count, new_count);
|
||||
if (new_count > old_count)
|
||||
memset(reinterpret_cast<uint8_t *>(new_ptr) + (old_count * info.size), 0, (new_count - old_count) * info.size);
|
||||
return new_ptr;
|
||||
}
|
||||
|
||||
void *CAllocator::internal_reallocate_nonzero(const mem::TypeInfo &info, void *old_ptr, size_t old_count, size_t new_count) {
|
||||
#ifdef ZIG_ENABLE_MEM_PROFILE
|
||||
this->profile->record_dealloc(info, old_count);
|
||||
this->profile->record_alloc(info, new_count);
|
||||
#endif
|
||||
return mem::os::realloc(old_ptr, new_count * info.size);
|
||||
}
|
||||
|
||||
void CAllocator::internal_deallocate(const mem::TypeInfo &info, void *ptr, size_t count) {
|
||||
#ifdef ZIG_ENABLE_MEM_PROFILE
|
||||
this->profile->record_dealloc(info, count);
|
||||
#endif
|
||||
mem::os::free(ptr);
|
||||
}
|
||||
|
||||
struct ArenaAllocator::Impl {
|
||||
Allocator *backing;
|
||||
|
||||
// regular allocations bump through a segment of static size
|
||||
struct Segment {
|
||||
static constexpr size_t size = 65536;
|
||||
static constexpr size_t object_threshold = 4096;
|
||||
|
||||
uint8_t data[size];
|
||||
};
|
||||
|
||||
// active segment
|
||||
Segment *segment;
|
||||
size_t segment_offset;
|
||||
|
||||
// keep track of segments
|
||||
struct SegmentTrack {
|
||||
static constexpr size_t size = (4096 - sizeof(SegmentTrack *)) / sizeof(Segment *);
|
||||
|
||||
// null if first
|
||||
SegmentTrack *prev;
|
||||
Segment *segments[size];
|
||||
};
|
||||
static_assert(sizeof(SegmentTrack) <= 4096, "unwanted struct padding");
|
||||
|
||||
// active segment track
|
||||
SegmentTrack *segment_track;
|
||||
size_t segment_track_remain;
|
||||
|
||||
// individual allocations punted to backing allocator
|
||||
struct Object {
|
||||
uint8_t *ptr;
|
||||
size_t len;
|
||||
};
|
||||
|
||||
// keep track of objects
|
||||
struct ObjectTrack {
|
||||
static constexpr size_t size = (4096 - sizeof(ObjectTrack *)) / sizeof(Object);
|
||||
|
||||
// null if first
|
||||
ObjectTrack *prev;
|
||||
Object objects[size];
|
||||
};
|
||||
static_assert(sizeof(ObjectTrack) <= 4096, "unwanted struct padding");
|
||||
|
||||
// active object track
|
||||
ObjectTrack *object_track;
|
||||
size_t object_track_remain;
|
||||
|
||||
ATTRIBUTE_RETURNS_NOALIAS inline void *allocate(const mem::TypeInfo& info, size_t count);
|
||||
inline void *reallocate(const mem::TypeInfo& info, void *old_ptr, size_t old_count, size_t new_count);
|
||||
|
||||
inline void new_segment();
|
||||
inline void track_segment();
|
||||
inline void track_object(Object object);
|
||||
};
|
||||
|
||||
void *ArenaAllocator::Impl::allocate(const mem::TypeInfo& info, size_t count) {
|
||||
#ifndef NDEBUG
|
||||
// make behavior when size == 0 portable
|
||||
if (info.size == 0 || count == 0)
|
||||
return nullptr;
|
||||
#endif
|
||||
const size_t nbytes = info.size * count;
|
||||
this->segment_offset = (this->segment_offset + (info.alignment - 1)) & ~(info.alignment - 1);
|
||||
if (nbytes >= Segment::object_threshold) {
|
||||
auto ptr = this->backing->allocate<uint8_t>(nbytes);
|
||||
this->track_object({ptr, nbytes});
|
||||
return ptr;
|
||||
}
|
||||
if (this->segment_offset + nbytes > Segment::size)
|
||||
this->new_segment();
|
||||
auto ptr = &this->segment->data[this->segment_offset];
|
||||
this->segment_offset += nbytes;
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void *ArenaAllocator::Impl::reallocate(const mem::TypeInfo& info, void *old_ptr, size_t old_count, size_t new_count) {
|
||||
#ifndef NDEBUG
|
||||
// make behavior when size == 0 portable
|
||||
if (info.size == 0 && old_ptr == nullptr)
|
||||
return nullptr;
|
||||
#endif
|
||||
const size_t new_nbytes = info.size * new_count;
|
||||
if (new_nbytes <= info.size * old_count)
|
||||
return old_ptr;
|
||||
const size_t old_nbytes = info.size * old_count;
|
||||
this->segment_offset = (this->segment_offset + (info.alignment - 1)) & ~(info.alignment - 1);
|
||||
if (new_nbytes >= Segment::object_threshold) {
|
||||
auto new_ptr = this->backing->allocate<uint8_t>(new_nbytes);
|
||||
this->track_object({new_ptr, new_nbytes});
|
||||
memcpy(new_ptr, old_ptr, old_nbytes);
|
||||
return new_ptr;
|
||||
}
|
||||
if (this->segment_offset + new_nbytes > Segment::size)
|
||||
this->new_segment();
|
||||
auto new_ptr = &this->segment->data[this->segment_offset];
|
||||
this->segment_offset += new_nbytes;
|
||||
memcpy(new_ptr, old_ptr, old_nbytes);
|
||||
return new_ptr;
|
||||
}
|
||||
|
||||
void ArenaAllocator::Impl::new_segment() {
|
||||
this->segment = this->backing->create<Segment>();
|
||||
this->segment_offset = 0;
|
||||
this->track_segment();
|
||||
}
|
||||
|
||||
void ArenaAllocator::Impl::track_segment() {
|
||||
assert(this->segment != nullptr);
|
||||
if (this->segment_track_remain < 1) {
|
||||
auto prev = this->segment_track;
|
||||
this->segment_track = this->backing->create<SegmentTrack>();
|
||||
this->segment_track->prev = prev;
|
||||
this->segment_track_remain = SegmentTrack::size;
|
||||
}
|
||||
this->segment_track_remain -= 1;
|
||||
this->segment_track->segments[this->segment_track_remain] = this->segment;
|
||||
}
|
||||
|
||||
void ArenaAllocator::Impl::track_object(Object object) {
|
||||
if (this->object_track_remain < 1) {
|
||||
auto prev = this->object_track;
|
||||
this->object_track = this->backing->create<ObjectTrack>();
|
||||
this->object_track->prev = prev;
|
||||
this->object_track_remain = ObjectTrack::size;
|
||||
}
|
||||
this->object_track_remain -= 1;
|
||||
this->object_track->objects[this->object_track_remain] = object;
|
||||
}
|
||||
|
||||
void ArenaAllocator::init(Allocator *backing, const char *name) {
|
||||
#ifdef ZIG_ENABLE_MEM_PROFILE
|
||||
this->profile = bootstrap_allocator.create<mem::Profile>();
|
||||
this->profile->init(name, "ArenaAllocator");
|
||||
#endif
|
||||
this->impl = bootstrap_allocator.create<Impl>();
|
||||
{
|
||||
auto &r = *this->impl;
|
||||
r.backing = backing;
|
||||
r.segment_offset = Impl::Segment::size;
|
||||
}
|
||||
}
|
||||
|
||||
void ArenaAllocator::deinit() {
|
||||
auto &backing = *this->impl->backing;
|
||||
|
||||
// segments
|
||||
if (this->impl->segment_track) {
|
||||
// active track is not full and bounded by track_remain
|
||||
auto prev = this->impl->segment_track->prev;
|
||||
{
|
||||
auto t = this->impl->segment_track;
|
||||
for (size_t i = this->impl->segment_track_remain; i < Impl::SegmentTrack::size; ++i)
|
||||
backing.destroy(t->segments[i]);
|
||||
backing.destroy(t);
|
||||
}
|
||||
|
||||
// previous tracks are full
|
||||
for (auto t = prev; t != nullptr;) {
|
||||
for (size_t i = 0; i < Impl::SegmentTrack::size; ++i)
|
||||
backing.destroy(t->segments[i]);
|
||||
prev = t->prev;
|
||||
backing.destroy(t);
|
||||
t = prev;
|
||||
}
|
||||
}
|
||||
|
||||
// objects
|
||||
if (this->impl->object_track) {
|
||||
// active track is not full and bounded by track_remain
|
||||
auto prev = this->impl->object_track->prev;
|
||||
{
|
||||
auto t = this->impl->object_track;
|
||||
for (size_t i = this->impl->object_track_remain; i < Impl::ObjectTrack::size; ++i) {
|
||||
auto &obj = t->objects[i];
|
||||
backing.deallocate(obj.ptr, obj.len);
|
||||
}
|
||||
backing.destroy(t);
|
||||
}
|
||||
|
||||
// previous tracks are full
|
||||
for (auto t = prev; t != nullptr;) {
|
||||
for (size_t i = 0; i < Impl::ObjectTrack::size; ++i) {
|
||||
auto &obj = t->objects[i];
|
||||
backing.deallocate(obj.ptr, obj.len);
|
||||
}
|
||||
prev = t->prev;
|
||||
backing.destroy(t);
|
||||
t = prev;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef ZIG_ENABLE_MEM_PROFILE
|
||||
assert(this->profile);
|
||||
this->profile->deinit();
|
||||
bootstrap_allocator.destroy(this->profile);
|
||||
this->profile = nullptr;
|
||||
#endif
|
||||
}
|
||||
|
||||
ArenaAllocator *ArenaAllocator::construct(mem::Allocator *allocator, mem::Allocator *backing, const char *name) {
|
||||
auto p = new(allocator->create<ArenaAllocator>()) ArenaAllocator;
|
||||
p->init(backing, name);
|
||||
return p;
|
||||
}
|
||||
|
||||
void ArenaAllocator::destruct(mem::Allocator *allocator) {
|
||||
this->deinit();
|
||||
allocator->destroy(this);
|
||||
}
|
||||
|
||||
#ifdef ZIG_ENABLE_MEM_PROFILE
|
||||
void ArenaAllocator::print_report(FILE *file) {
|
||||
this->profile->print_report(file);
|
||||
}
|
||||
#endif
|
||||
|
||||
void *ArenaAllocator::internal_allocate(const mem::TypeInfo &info, size_t count) {
|
||||
#ifdef ZIG_ENABLE_MEM_PROFILE
|
||||
this->profile->record_alloc(info, count);
|
||||
#endif
|
||||
return this->impl->allocate(info, count);
|
||||
}
|
||||
|
||||
void *ArenaAllocator::internal_allocate_nonzero(const mem::TypeInfo &info, size_t count) {
|
||||
#ifdef ZIG_ENABLE_MEM_PROFILE
|
||||
this->profile->record_alloc(info, count);
|
||||
#endif
|
||||
return this->impl->allocate(info, count);
|
||||
}
|
||||
|
||||
void *ArenaAllocator::internal_reallocate(const mem::TypeInfo &info, void *old_ptr, size_t old_count, size_t new_count) {
|
||||
return this->internal_reallocate_nonzero(info, old_ptr, old_count, new_count);
|
||||
}
|
||||
|
||||
void *ArenaAllocator::internal_reallocate_nonzero(const mem::TypeInfo &info, void *old_ptr, size_t old_count, size_t new_count) {
|
||||
#ifdef ZIG_ENABLE_MEM_PROFILE
|
||||
this->profile->record_dealloc(info, old_count);
|
||||
this->profile->record_alloc(info, new_count);
|
||||
#endif
|
||||
return this->impl->reallocate(info, old_ptr, old_count, new_count);
|
||||
}
|
||||
|
||||
void ArenaAllocator::internal_deallocate(const mem::TypeInfo &info, void *ptr, size_t count) {
|
||||
#ifdef ZIG_ENABLE_MEM_PROFILE
|
||||
this->profile->record_dealloc(info, count);
|
||||
#endif
|
||||
// noop
|
||||
}
|
||||
|
||||
BootstrapAllocator bootstrap_allocator_state;
|
||||
mem::Allocator &bootstrap_allocator = bootstrap_allocator_state;
|
||||
|
||||
CAllocator c_allocator_state;
|
||||
mem::Allocator &c_allocator = c_allocator_state;
|
||||
|
||||
} // namespace heap
|
101
src/heap.hpp
Normal file
101
src/heap.hpp
Normal file
@ -0,0 +1,101 @@
|
||||
/*
|
||||
* Copyright (c) 2020 Andrew Kelley
|
||||
*
|
||||
* This file is part of zig, which is MIT licensed.
|
||||
* See http://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
#ifndef ZIG_HEAP_HPP
|
||||
#define ZIG_HEAP_HPP
|
||||
|
||||
#include "config.h"
|
||||
#include "util_base.hpp"
|
||||
#include "mem.hpp"
|
||||
|
||||
#ifdef ZIG_ENABLE_MEM_PROFILE
|
||||
namespace mem {
|
||||
struct Profile;
|
||||
}
|
||||
#endif
|
||||
|
||||
namespace heap {
|
||||
|
||||
struct BootstrapAllocator final : mem::Allocator {
|
||||
void init(const char *name);
|
||||
void deinit();
|
||||
void destruct(Allocator *allocator) {}
|
||||
|
||||
private:
|
||||
ATTRIBUTE_RETURNS_NOALIAS void *internal_allocate(const mem::TypeInfo &info, size_t count) final;
|
||||
ATTRIBUTE_RETURNS_NOALIAS void *internal_allocate_nonzero(const mem::TypeInfo &info, size_t count) final;
|
||||
void *internal_reallocate(const mem::TypeInfo &info, void *old_ptr, size_t old_count, size_t new_count) final;
|
||||
void *internal_reallocate_nonzero(const mem::TypeInfo &info, void *old_ptr, size_t old_count, size_t new_count) final;
|
||||
void internal_deallocate(const mem::TypeInfo &info, void *ptr, size_t count) final;
|
||||
};
|
||||
|
||||
struct CAllocator final : mem::Allocator {
|
||||
void init(const char *name);
|
||||
void deinit();
|
||||
|
||||
static CAllocator *construct(mem::Allocator *allocator, const char *name);
|
||||
void destruct(mem::Allocator *allocator) final;
|
||||
|
||||
#ifdef ZIG_ENABLE_MEM_PROFILE
|
||||
void print_report(FILE *file = nullptr);
|
||||
#endif
|
||||
|
||||
private:
|
||||
ATTRIBUTE_RETURNS_NOALIAS void *internal_allocate(const mem::TypeInfo &info, size_t count) final;
|
||||
ATTRIBUTE_RETURNS_NOALIAS void *internal_allocate_nonzero(const mem::TypeInfo &info, size_t count) final;
|
||||
void *internal_reallocate(const mem::TypeInfo &info, void *old_ptr, size_t old_count, size_t new_count) final;
|
||||
void *internal_reallocate_nonzero(const mem::TypeInfo &info, void *old_ptr, size_t old_count, size_t new_count) final;
|
||||
void internal_deallocate(const mem::TypeInfo &info, void *ptr, size_t count) final;
|
||||
|
||||
#ifdef ZIG_ENABLE_MEM_PROFILE
|
||||
mem::Profile *profile;
|
||||
#endif
|
||||
};
|
||||
|
||||
//
|
||||
// arena allocator
|
||||
//
|
||||
// - allocations are backed by the underlying allocator's memory
|
||||
// - allocations are N:1 relationship to underlying allocations
|
||||
// - dellocations are noops
|
||||
// - deinit() releases all underlying memory
|
||||
//
|
||||
struct ArenaAllocator final : mem::Allocator {
|
||||
void init(Allocator *backing, const char *name);
|
||||
void deinit();
|
||||
|
||||
static ArenaAllocator *construct(mem::Allocator *allocator, mem::Allocator *backing, const char *name);
|
||||
void destruct(mem::Allocator *allocator) final;
|
||||
|
||||
#ifdef ZIG_ENABLE_MEM_PROFILE
|
||||
void print_report(FILE *file = nullptr);
|
||||
#endif
|
||||
|
||||
private:
|
||||
ATTRIBUTE_RETURNS_NOALIAS void *internal_allocate(const mem::TypeInfo &info, size_t count) final;
|
||||
ATTRIBUTE_RETURNS_NOALIAS void *internal_allocate_nonzero(const mem::TypeInfo &info, size_t count) final;
|
||||
void *internal_reallocate(const mem::TypeInfo &info, void *old_ptr, size_t old_count, size_t new_count) final;
|
||||
void *internal_reallocate_nonzero(const mem::TypeInfo &info, void *old_ptr, size_t old_count, size_t new_count) final;
|
||||
void internal_deallocate(const mem::TypeInfo &info, void *ptr, size_t count) final;
|
||||
|
||||
#ifdef ZIG_ENABLE_MEM_PROFILE
|
||||
mem::Profile *profile;
|
||||
#endif
|
||||
|
||||
struct Impl;
|
||||
Impl *impl;
|
||||
};
|
||||
|
||||
extern BootstrapAllocator bootstrap_allocator_state;
|
||||
extern mem::Allocator &bootstrap_allocator;
|
||||
|
||||
extern CAllocator c_allocator_state;
|
||||
extern mem::Allocator &c_allocator;
|
||||
|
||||
} // namespace heap
|
||||
|
||||
#endif
|
1039
src/ir.cpp
1039
src/ir.cpp
File diff suppressed because it is too large
Load Diff
@ -37,6 +37,4 @@ ZigValue *const_ptr_pointee(IrAnalyze *ira, CodeGen *codegen, ZigValue *const_va
|
||||
void dbg_ir_break(const char *src_file, uint32_t line);
|
||||
void dbg_ir_clear(void);
|
||||
|
||||
void destroy_instruction_gen(IrInstGen *inst);
|
||||
|
||||
#endif
|
||||
|
38
src/link.cpp
38
src/link.cpp
@ -650,7 +650,7 @@ static const char *build_libunwind(CodeGen *parent, Stage2ProgressNode *progress
|
||||
};
|
||||
ZigList<CFile *> c_source_files = {0};
|
||||
for (size_t i = 0; i < array_length(unwind_src); i += 1) {
|
||||
CFile *c_file = allocate<CFile>(1);
|
||||
CFile *c_file = heap::c_allocator.create<CFile>();
|
||||
c_file->source_path = path_from_libunwind(parent, unwind_src[i].path);
|
||||
switch (unwind_src[i].kind) {
|
||||
case SrcC:
|
||||
@ -1111,7 +1111,7 @@ static const char *build_musl(CodeGen *parent, Stage2ProgressNode *progress_node
|
||||
Buf *full_path = buf_sprintf("%s" OS_SEP "libc" OS_SEP "%s",
|
||||
buf_ptr(parent->zig_lib_dir), buf_ptr(src_file));
|
||||
|
||||
CFile *c_file = allocate<CFile>(1);
|
||||
CFile *c_file = heap::c_allocator.create<CFile>();
|
||||
c_file->source_path = buf_ptr(full_path);
|
||||
|
||||
musl_add_cc_args(parent, c_file, src_kind == MuslSrcO3);
|
||||
@ -1127,7 +1127,7 @@ static const char *build_musl(CodeGen *parent, Stage2ProgressNode *progress_node
|
||||
}
|
||||
|
||||
static void add_msvcrt_os_dep(CodeGen *parent, CodeGen *child_gen, const char *src_path) {
|
||||
CFile *c_file = allocate<CFile>(1);
|
||||
CFile *c_file = heap::c_allocator.create<CFile>();
|
||||
c_file->source_path = buf_ptr(buf_sprintf("%s" OS_SEP "libc" OS_SEP "mingw" OS_SEP "%s",
|
||||
buf_ptr(parent->zig_lib_dir), src_path));
|
||||
c_file->args.append("-DHAVE_CONFIG_H");
|
||||
@ -1151,7 +1151,7 @@ static void add_msvcrt_os_dep(CodeGen *parent, CodeGen *child_gen, const char *s
|
||||
}
|
||||
|
||||
static void add_mingwex_os_dep(CodeGen *parent, CodeGen *child_gen, const char *src_path) {
|
||||
CFile *c_file = allocate<CFile>(1);
|
||||
CFile *c_file = heap::c_allocator.create<CFile>();
|
||||
c_file->source_path = buf_ptr(buf_sprintf("%s" OS_SEP "libc" OS_SEP "mingw" OS_SEP "%s",
|
||||
buf_ptr(parent->zig_lib_dir), src_path));
|
||||
c_file->args.append("-DHAVE_CONFIG_H");
|
||||
@ -1178,7 +1178,7 @@ static void add_mingwex_os_dep(CodeGen *parent, CodeGen *child_gen, const char *
|
||||
static const char *get_libc_crt_file(CodeGen *parent, const char *file, Stage2ProgressNode *progress_node) {
|
||||
if (parent->libc == nullptr && parent->zig_target->os == OsWindows) {
|
||||
if (strcmp(file, "crt2.o") == 0) {
|
||||
CFile *c_file = allocate<CFile>(1);
|
||||
CFile *c_file = heap::c_allocator.create<CFile>();
|
||||
c_file->source_path = buf_ptr(buf_sprintf(
|
||||
"%s" OS_SEP "libc" OS_SEP "mingw" OS_SEP "crt" OS_SEP "crtexe.c", buf_ptr(parent->zig_lib_dir)));
|
||||
mingw_add_cc_args(parent, c_file);
|
||||
@ -1190,7 +1190,7 @@ static const char *get_libc_crt_file(CodeGen *parent, const char *file, Stage2Pr
|
||||
//c_file->args.append("-DWPRFLAG=1");
|
||||
return build_libc_object(parent, "crt2", c_file, progress_node);
|
||||
} else if (strcmp(file, "dllcrt2.o") == 0) {
|
||||
CFile *c_file = allocate<CFile>(1);
|
||||
CFile *c_file = heap::c_allocator.create<CFile>();
|
||||
c_file->source_path = buf_ptr(buf_sprintf(
|
||||
"%s" OS_SEP "libc" OS_SEP "mingw" OS_SEP "crt" OS_SEP "crtdll.c", buf_ptr(parent->zig_lib_dir)));
|
||||
mingw_add_cc_args(parent, c_file);
|
||||
@ -1231,7 +1231,7 @@ static const char *get_libc_crt_file(CodeGen *parent, const char *file, Stage2Pr
|
||||
"mingw" OS_SEP "crt" OS_SEP "cxa_atexit.c",
|
||||
};
|
||||
for (size_t i = 0; i < array_length(deps); i += 1) {
|
||||
CFile *c_file = allocate<CFile>(1);
|
||||
CFile *c_file = heap::c_allocator.create<CFile>();
|
||||
c_file->source_path = path_from_libc(parent, deps[i]);
|
||||
c_file->args.append("-DHAVE_CONFIG_H");
|
||||
c_file->args.append("-D_SYSCRT=1");
|
||||
@ -1301,7 +1301,7 @@ static const char *get_libc_crt_file(CodeGen *parent, const char *file, Stage2Pr
|
||||
}
|
||||
} else if (parent->libc == nullptr && target_is_glibc(parent->zig_target)) {
|
||||
if (strcmp(file, "crti.o") == 0) {
|
||||
CFile *c_file = allocate<CFile>(1);
|
||||
CFile *c_file = heap::c_allocator.create<CFile>();
|
||||
c_file->source_path = glibc_start_asm_path(parent, "crti.S");
|
||||
glibc_add_include_dirs(parent, c_file);
|
||||
c_file->args.append("-D_LIBC_REENTRANT");
|
||||
@ -1317,7 +1317,7 @@ static const char *get_libc_crt_file(CodeGen *parent, const char *file, Stage2Pr
|
||||
c_file->args.append("-Wa,--noexecstack");
|
||||
return build_libc_object(parent, "crti", c_file, progress_node);
|
||||
} else if (strcmp(file, "crtn.o") == 0) {
|
||||
CFile *c_file = allocate<CFile>(1);
|
||||
CFile *c_file = heap::c_allocator.create<CFile>();
|
||||
c_file->source_path = glibc_start_asm_path(parent, "crtn.S");
|
||||
glibc_add_include_dirs(parent, c_file);
|
||||
c_file->args.append("-D_LIBC_REENTRANT");
|
||||
@ -1328,7 +1328,7 @@ static const char *get_libc_crt_file(CodeGen *parent, const char *file, Stage2Pr
|
||||
c_file->args.append("-Wa,--noexecstack");
|
||||
return build_libc_object(parent, "crtn", c_file, progress_node);
|
||||
} else if (strcmp(file, "start.os") == 0) {
|
||||
CFile *c_file = allocate<CFile>(1);
|
||||
CFile *c_file = heap::c_allocator.create<CFile>();
|
||||
c_file->source_path = glibc_start_asm_path(parent, "start.S");
|
||||
glibc_add_include_dirs(parent, c_file);
|
||||
c_file->args.append("-D_LIBC_REENTRANT");
|
||||
@ -1346,7 +1346,7 @@ static const char *get_libc_crt_file(CodeGen *parent, const char *file, Stage2Pr
|
||||
c_file->args.append("-Wa,--noexecstack");
|
||||
return build_libc_object(parent, "start", c_file, progress_node);
|
||||
} else if (strcmp(file, "abi-note.o") == 0) {
|
||||
CFile *c_file = allocate<CFile>(1);
|
||||
CFile *c_file = heap::c_allocator.create<CFile>();
|
||||
c_file->source_path = path_from_libc(parent, "glibc" OS_SEP "csu" OS_SEP "abi-note.S");
|
||||
c_file->args.append("-I");
|
||||
c_file->args.append(path_from_libc(parent, "glibc" OS_SEP "csu"));
|
||||
@ -1369,7 +1369,7 @@ static const char *get_libc_crt_file(CodeGen *parent, const char *file, Stage2Pr
|
||||
} else if (strcmp(file, "libc_nonshared.a") == 0) {
|
||||
CodeGen *child_gen = create_child_codegen(parent, nullptr, OutTypeLib, nullptr, "c_nonshared", progress_node);
|
||||
{
|
||||
CFile *c_file = allocate<CFile>(1);
|
||||
CFile *c_file = heap::c_allocator.create<CFile>();
|
||||
c_file->source_path = path_from_libc(parent, "glibc" OS_SEP "csu" OS_SEP "elf-init.c");
|
||||
c_file->args.append("-std=gnu11");
|
||||
c_file->args.append("-fgnu89-inline");
|
||||
@ -1419,7 +1419,7 @@ static const char *get_libc_crt_file(CodeGen *parent, const char *file, Stage2Pr
|
||||
{"stack_chk_fail_local", "glibc" OS_SEP "debug" OS_SEP "stack_chk_fail_local.c"},
|
||||
};
|
||||
for (size_t i = 0; i < array_length(deps); i += 1) {
|
||||
CFile *c_file = allocate<CFile>(1);
|
||||
CFile *c_file = heap::c_allocator.create<CFile>();
|
||||
c_file->source_path = path_from_libc(parent, deps[i].path);
|
||||
c_file->args.append("-std=gnu11");
|
||||
c_file->args.append("-fgnu89-inline");
|
||||
@ -1451,26 +1451,26 @@ static const char *get_libc_crt_file(CodeGen *parent, const char *file, Stage2Pr
|
||||
}
|
||||
} else if (parent->libc == nullptr && target_is_musl(parent->zig_target)) {
|
||||
if (strcmp(file, "crti.o") == 0) {
|
||||
CFile *c_file = allocate<CFile>(1);
|
||||
CFile *c_file = heap::c_allocator.create<CFile>();
|
||||
c_file->source_path = musl_start_asm_path(parent, "crti.s");
|
||||
musl_add_cc_args(parent, c_file, false);
|
||||
c_file->args.append("-Qunused-arguments");
|
||||
return build_libc_object(parent, "crti", c_file, progress_node);
|
||||
} else if (strcmp(file, "crtn.o") == 0) {
|
||||
CFile *c_file = allocate<CFile>(1);
|
||||
CFile *c_file = heap::c_allocator.create<CFile>();
|
||||
c_file->source_path = musl_start_asm_path(parent, "crtn.s");
|
||||
c_file->args.append("-Qunused-arguments");
|
||||
musl_add_cc_args(parent, c_file, false);
|
||||
return build_libc_object(parent, "crtn", c_file, progress_node);
|
||||
} else if (strcmp(file, "crt1.o") == 0) {
|
||||
CFile *c_file = allocate<CFile>(1);
|
||||
CFile *c_file = heap::c_allocator.create<CFile>();
|
||||
c_file->source_path = path_from_libc(parent, "musl" OS_SEP "crt" OS_SEP "crt1.c");
|
||||
musl_add_cc_args(parent, c_file, false);
|
||||
c_file->args.append("-fno-stack-protector");
|
||||
c_file->args.append("-DCRT");
|
||||
return build_libc_object(parent, "crt1", c_file, progress_node);
|
||||
} else if (strcmp(file, "Scrt1.o") == 0) {
|
||||
CFile *c_file = allocate<CFile>(1);
|
||||
CFile *c_file = heap::c_allocator.create<CFile>();
|
||||
c_file->source_path = path_from_libc(parent, "musl" OS_SEP "crt" OS_SEP "Scrt1.c");
|
||||
musl_add_cc_args(parent, c_file, false);
|
||||
c_file->args.append("-fPIC");
|
||||
@ -1982,7 +1982,7 @@ static const char *get_def_lib(CodeGen *parent, const char *name, Buf *def_in_fi
|
||||
Buf *def_include_dir = buf_sprintf("%s" OS_SEP "libc" OS_SEP "mingw" OS_SEP "def-include",
|
||||
buf_ptr(parent->zig_lib_dir));
|
||||
|
||||
CacheHash *cache_hash = allocate<CacheHash>(1);
|
||||
CacheHash *cache_hash = heap::c_allocator.create<CacheHash>();
|
||||
cache_init(cache_hash, manifest_dir);
|
||||
|
||||
cache_buf(cache_hash, compiler_id);
|
||||
@ -2367,7 +2367,7 @@ static void construct_linker_job_coff(LinkJob *lj) {
|
||||
|
||||
lj->args.append(get_def_lib(g, name, &lib_path));
|
||||
|
||||
free(name);
|
||||
mem::os::free(name);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -13,7 +13,7 @@
|
||||
template<typename T>
|
||||
struct ZigList {
|
||||
void deinit() {
|
||||
deallocate(items, capacity);
|
||||
heap::c_allocator.deallocate(items, capacity);
|
||||
}
|
||||
void append(const T& item) {
|
||||
ensure_capacity(length + 1);
|
||||
@ -70,7 +70,7 @@ struct ZigList {
|
||||
better_capacity = better_capacity * 5 / 2 + 8;
|
||||
} while (better_capacity < new_capacity);
|
||||
|
||||
items = reallocate_nonzero(items, capacity, better_capacity);
|
||||
items = heap::c_allocator.reallocate_nonzero(items, capacity, better_capacity);
|
||||
capacity = better_capacity;
|
||||
}
|
||||
|
||||
@ -91,5 +91,3 @@ struct ZigList {
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
|
48
src/main.cpp
48
src/main.cpp
@ -11,12 +11,14 @@
|
||||
#include "compiler.hpp"
|
||||
#include "config.h"
|
||||
#include "error.hpp"
|
||||
#include "heap.hpp"
|
||||
#include "os.hpp"
|
||||
#include "target.hpp"
|
||||
#include "libc_installation.hpp"
|
||||
#include "userland.h"
|
||||
#include "glibc.hpp"
|
||||
#include "dump_analysis.hpp"
|
||||
#include "mem_profile.hpp"
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
@ -243,21 +245,10 @@ int main_exit(Stage2ProgressNode *root_progress_node, int exit_code) {
|
||||
if (root_progress_node != nullptr) {
|
||||
stage2_progress_end(root_progress_node);
|
||||
}
|
||||
#ifdef ZIG_ENABLE_MEM_PROFILE
|
||||
if (mem_report) {
|
||||
memprof_dump_stats(stderr);
|
||||
}
|
||||
#endif
|
||||
return exit_code;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
stage2_attach_segfault_handler();
|
||||
|
||||
#ifdef ZIG_ENABLE_MEM_PROFILE
|
||||
memprof_init();
|
||||
#endif
|
||||
|
||||
static int main0(int argc, char **argv) {
|
||||
char *arg0 = argv[0];
|
||||
Error err;
|
||||
|
||||
@ -278,9 +269,6 @@ int main(int argc, char **argv) {
|
||||
return ZigClang_main(argc, argv);
|
||||
}
|
||||
|
||||
// Must be before all os.hpp function calls.
|
||||
os_init();
|
||||
|
||||
if (argc == 2 && strcmp(argv[1], "id") == 0) {
|
||||
Buf *compiler_id;
|
||||
if ((err = get_compiler_id(&compiler_id))) {
|
||||
@ -439,7 +427,7 @@ int main(int argc, char **argv) {
|
||||
bool enable_doc_generation = false;
|
||||
bool disable_bin_generation = false;
|
||||
const char *cache_dir = nullptr;
|
||||
CliPkg *cur_pkg = allocate<CliPkg>(1);
|
||||
CliPkg *cur_pkg = heap::c_allocator.create<CliPkg>();
|
||||
BuildMode build_mode = BuildModeDebug;
|
||||
ZigList<const char *> test_exec_args = {0};
|
||||
int runtime_args_start = -1;
|
||||
@ -635,6 +623,7 @@ int main(int argc, char **argv) {
|
||||
} else if (strcmp(arg, "-fmem-report") == 0) {
|
||||
#ifdef ZIG_ENABLE_MEM_PROFILE
|
||||
mem_report = true;
|
||||
mem::report_print = true;
|
||||
#else
|
||||
fprintf(stderr, "-fmem-report requires configuring with -DZIG_ENABLE_MEM_PROFILE=ON\n");
|
||||
return print_error_usage(arg0);
|
||||
@ -695,7 +684,7 @@ int main(int argc, char **argv) {
|
||||
fprintf(stderr, "Expected 2 arguments after --pkg-begin\n");
|
||||
return print_error_usage(arg0);
|
||||
}
|
||||
CliPkg *new_cur_pkg = allocate<CliPkg>(1);
|
||||
CliPkg *new_cur_pkg = heap::c_allocator.create<CliPkg>();
|
||||
i += 1;
|
||||
new_cur_pkg->name = argv[i];
|
||||
i += 1;
|
||||
@ -810,7 +799,7 @@ int main(int argc, char **argv) {
|
||||
} else if (strcmp(arg, "--object") == 0) {
|
||||
objects.append(argv[i]);
|
||||
} else if (strcmp(arg, "--c-source") == 0) {
|
||||
CFile *c_file = allocate<CFile>(1);
|
||||
CFile *c_file = heap::c_allocator.create<CFile>();
|
||||
for (;;) {
|
||||
if (argv[i][0] == '-') {
|
||||
c_file->args.append(argv[i]);
|
||||
@ -990,7 +979,7 @@ int main(int argc, char **argv) {
|
||||
}
|
||||
}
|
||||
if (target_is_glibc(&target)) {
|
||||
target.glibc_version = allocate<ZigGLibCVersion>(1);
|
||||
target.glibc_version = heap::c_allocator.create<ZigGLibCVersion>();
|
||||
|
||||
if (target_glibc != nullptr) {
|
||||
if ((err = target_parse_glibc_version(target.glibc_version, target_glibc))) {
|
||||
@ -1138,7 +1127,7 @@ int main(int argc, char **argv) {
|
||||
}
|
||||
ZigLibCInstallation *libc = nullptr;
|
||||
if (libc_txt != nullptr) {
|
||||
libc = allocate<ZigLibCInstallation>(1);
|
||||
libc = heap::c_allocator.create<ZigLibCInstallation>();
|
||||
if ((err = zig_libc_parse(libc, buf_create_from_str(libc_txt), &target, true))) {
|
||||
fprintf(stderr, "Unable to parse --libc text file: %s\n", err_str(err));
|
||||
return main_exit(root_progress_node, EXIT_FAILURE);
|
||||
@ -1269,7 +1258,8 @@ int main(int argc, char **argv) {
|
||||
|
||||
if (cmd == CmdRun) {
|
||||
#ifdef ZIG_ENABLE_MEM_PROFILE
|
||||
memprof_dump_stats(stderr);
|
||||
if (mem::report_print)
|
||||
mem::print_report();
|
||||
#endif
|
||||
|
||||
const char *exec_path = buf_ptr(&g->output_file_path);
|
||||
@ -1384,4 +1374,20 @@ int main(int argc, char **argv) {
|
||||
case CmdNone:
|
||||
return print_full_usage(arg0, stderr, EXIT_FAILURE);
|
||||
}
|
||||
zig_unreachable();
|
||||
}
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
stage2_attach_segfault_handler();
|
||||
os_init();
|
||||
mem::init();
|
||||
|
||||
auto result = main0(argc, argv);
|
||||
|
||||
#ifdef ZIG_ENABLE_MEM_PROFILE
|
||||
if (mem::report_print)
|
||||
mem::intern_counters.print_report();
|
||||
#endif
|
||||
mem::deinit();
|
||||
return result;
|
||||
}
|
||||
|
37
src/mem.cpp
Normal file
37
src/mem.cpp
Normal file
@ -0,0 +1,37 @@
|
||||
/*
|
||||
* Copyright (c) 2020 Andrew Kelley
|
||||
*
|
||||
* This file is part of zig, which is MIT licensed.
|
||||
* See http://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
#include "config.h"
|
||||
#include "mem.hpp"
|
||||
#include "mem_profile.hpp"
|
||||
#include "heap.hpp"
|
||||
|
||||
namespace mem {
|
||||
|
||||
void init() {
|
||||
heap::bootstrap_allocator_state.init("heap::bootstrap_allocator");
|
||||
heap::c_allocator_state.init("heap::c_allocator");
|
||||
}
|
||||
|
||||
void deinit() {
|
||||
heap::c_allocator_state.deinit();
|
||||
heap::bootstrap_allocator_state.deinit();
|
||||
}
|
||||
|
||||
#ifdef ZIG_ENABLE_MEM_PROFILE
|
||||
void print_report(FILE *file) {
|
||||
heap::c_allocator_state.print_report(file);
|
||||
intern_counters.print_report(file);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef ZIG_ENABLE_MEM_PROFILE
|
||||
bool report_print = false;
|
||||
FILE *report_file{nullptr};
|
||||
#endif
|
||||
|
||||
} // namespace mem
|
149
src/mem.hpp
Normal file
149
src/mem.hpp
Normal file
@ -0,0 +1,149 @@
|
||||
/*
|
||||
* Copyright (c) 2020 Andrew Kelley
|
||||
*
|
||||
* This file is part of zig, which is MIT licensed.
|
||||
* See http://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
#ifndef ZIG_MEM_HPP
|
||||
#define ZIG_MEM_HPP
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "config.h"
|
||||
#include "util_base.hpp"
|
||||
#include "mem_type_info.hpp"
|
||||
|
||||
//
|
||||
// -- Memory Allocation General Notes --
|
||||
//
|
||||
// `heap::c_allocator` is the preferred general allocator.
|
||||
//
|
||||
// `heap::bootstrap_allocator` is an implementation detail for use
|
||||
// by allocators themselves when incidental heap may be required for
|
||||
// profiling and statistics. It breaks the infinite recursion cycle.
|
||||
//
|
||||
// `mem::os` contains a raw wrapper for system malloc API used in
|
||||
// preference to calling ::{malloc, free, calloc, realloc} directly.
|
||||
// This isolates usage and helps with audits:
|
||||
//
|
||||
// mem::os::malloc
|
||||
// mem::os::free
|
||||
// mem::os::calloc
|
||||
// mem::os::realloc
|
||||
//
|
||||
namespace mem {
|
||||
|
||||
// initialize mem module before any use
|
||||
void init();
|
||||
|
||||
// deinitialize mem module to free memory and print report
|
||||
void deinit();
|
||||
|
||||
// isolate system/libc allocators
|
||||
namespace os {
|
||||
|
||||
ATTRIBUTE_RETURNS_NOALIAS
|
||||
inline void *malloc(size_t size) {
|
||||
#ifndef NDEBUG
|
||||
// make behavior when size == 0 portable
|
||||
if (size == 0)
|
||||
return nullptr;
|
||||
#endif
|
||||
auto ptr = ::malloc(size);
|
||||
if (ptr == nullptr)
|
||||
zig_panic("allocation failed");
|
||||
return ptr;
|
||||
}
|
||||
|
||||
inline void free(void *ptr) {
|
||||
::free(ptr);
|
||||
}
|
||||
|
||||
ATTRIBUTE_RETURNS_NOALIAS
|
||||
inline void *calloc(size_t count, size_t size) {
|
||||
#ifndef NDEBUG
|
||||
// make behavior when size == 0 portable
|
||||
if (count == 0 || size == 0)
|
||||
return nullptr;
|
||||
#endif
|
||||
auto ptr = ::calloc(count, size);
|
||||
if (ptr == nullptr)
|
||||
zig_panic("allocation failed");
|
||||
return ptr;
|
||||
}
|
||||
|
||||
inline void *realloc(void *old_ptr, size_t size) {
|
||||
#ifndef NDEBUG
|
||||
// make behavior when size == 0 portable
|
||||
if (old_ptr == nullptr && size == 0)
|
||||
return nullptr;
|
||||
#endif
|
||||
auto ptr = ::realloc(old_ptr, size);
|
||||
if (ptr == nullptr)
|
||||
zig_panic("allocation failed");
|
||||
return ptr;
|
||||
}
|
||||
|
||||
} // namespace os
|
||||
|
||||
struct Allocator {
|
||||
virtual void destruct(Allocator *allocator) = 0;
|
||||
|
||||
template <typename T> ATTRIBUTE_RETURNS_NOALIAS
|
||||
T *allocate(size_t count) {
|
||||
return reinterpret_cast<T *>(this->internal_allocate(TypeInfo::make<T>(), count));
|
||||
}
|
||||
|
||||
template <typename T> ATTRIBUTE_RETURNS_NOALIAS
|
||||
T *allocate_nonzero(size_t count) {
|
||||
return reinterpret_cast<T *>(this->internal_allocate_nonzero(TypeInfo::make<T>(), count));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
T *reallocate(T *old_ptr, size_t old_count, size_t new_count) {
|
||||
return reinterpret_cast<T *>(this->internal_reallocate(TypeInfo::make<T>(), old_ptr, old_count, new_count));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
T *reallocate_nonzero(T *old_ptr, size_t old_count, size_t new_count) {
|
||||
return reinterpret_cast<T *>(this->internal_reallocate_nonzero(TypeInfo::make<T>(), old_ptr, old_count, new_count));
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void deallocate(T *ptr, size_t count) {
|
||||
this->internal_deallocate(TypeInfo::make<T>(), ptr, count);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
T *create() {
|
||||
return reinterpret_cast<T *>(this->internal_allocate(TypeInfo::make<T>(), 1));
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void destroy(T *ptr) {
|
||||
this->internal_deallocate(TypeInfo::make<T>(), ptr, 1);
|
||||
}
|
||||
|
||||
protected:
|
||||
ATTRIBUTE_RETURNS_NOALIAS virtual void *internal_allocate(const TypeInfo &info, size_t count) = 0;
|
||||
ATTRIBUTE_RETURNS_NOALIAS virtual void *internal_allocate_nonzero(const TypeInfo &info, size_t count) = 0;
|
||||
virtual void *internal_reallocate(const TypeInfo &info, void *old_ptr, size_t old_count, size_t new_count) = 0;
|
||||
virtual void *internal_reallocate_nonzero(const TypeInfo &info, void *old_ptr, size_t old_count, size_t new_count) = 0;
|
||||
virtual void internal_deallocate(const TypeInfo &info, void *ptr, size_t count) = 0;
|
||||
};
|
||||
|
||||
#ifdef ZIG_ENABLE_MEM_PROFILE
|
||||
void print_report(FILE *file = nullptr);
|
||||
|
||||
// global memory report flag
|
||||
extern bool report_print;
|
||||
// global memory report default destination
|
||||
extern FILE *report_file;
|
||||
#endif
|
||||
|
||||
} // namespace mem
|
||||
|
||||
#endif
|
244
src/mem_hash_map.hpp
Normal file
244
src/mem_hash_map.hpp
Normal file
@ -0,0 +1,244 @@
|
||||
/*
|
||||
* Copyright (c) 2015 Andrew Kelley
|
||||
*
|
||||
* This file is part of zig, which is MIT licensed.
|
||||
* See http://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
#ifndef ZIG_MEM_HASH_MAP_HPP
|
||||
#define ZIG_MEM_HASH_MAP_HPP
|
||||
|
||||
#include "mem.hpp"
|
||||
|
||||
namespace mem {
|
||||
|
||||
template<typename K, typename V, uint32_t (*HashFunction)(K key), bool (*EqualFn)(K a, K b)>
|
||||
class HashMap {
|
||||
public:
|
||||
void init(Allocator& allocator, int capacity) {
|
||||
init_capacity(allocator, capacity);
|
||||
}
|
||||
void deinit(Allocator& allocator) {
|
||||
allocator.deallocate(_entries, _capacity);
|
||||
}
|
||||
|
||||
struct Entry {
|
||||
K key;
|
||||
V value;
|
||||
bool used;
|
||||
int distance_from_start_index;
|
||||
};
|
||||
|
||||
void clear() {
|
||||
for (int i = 0; i < _capacity; i += 1) {
|
||||
_entries[i].used = false;
|
||||
}
|
||||
_size = 0;
|
||||
_max_distance_from_start_index = 0;
|
||||
_modification_count += 1;
|
||||
}
|
||||
|
||||
int size() const {
|
||||
return _size;
|
||||
}
|
||||
|
||||
void put(Allocator& allocator, const K &key, const V &value) {
|
||||
_modification_count += 1;
|
||||
internal_put(key, value);
|
||||
|
||||
// if we get too full (60%), double the capacity
|
||||
if (_size * 5 >= _capacity * 3) {
|
||||
Entry *old_entries = _entries;
|
||||
int old_capacity = _capacity;
|
||||
init_capacity(allocator, _capacity * 2);
|
||||
// dump all of the old elements into the new table
|
||||
for (int i = 0; i < old_capacity; i += 1) {
|
||||
Entry *old_entry = &old_entries[i];
|
||||
if (old_entry->used)
|
||||
internal_put(old_entry->key, old_entry->value);
|
||||
}
|
||||
allocator.deallocate(old_entries, old_capacity);
|
||||
}
|
||||
}
|
||||
|
||||
Entry *put_unique(Allocator& allocator, const K &key, const V &value) {
|
||||
// TODO make this more efficient
|
||||
Entry *entry = internal_get(key);
|
||||
if (entry)
|
||||
return entry;
|
||||
put(allocator, key, value);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
const V &get(const K &key) const {
|
||||
Entry *entry = internal_get(key);
|
||||
if (!entry)
|
||||
zig_panic("key not found");
|
||||
return entry->value;
|
||||
}
|
||||
|
||||
Entry *maybe_get(const K &key) const {
|
||||
return internal_get(key);
|
||||
}
|
||||
|
||||
void maybe_remove(const K &key) {
|
||||
if (maybe_get(key)) {
|
||||
remove(key);
|
||||
}
|
||||
}
|
||||
|
||||
void remove(const K &key) {
|
||||
_modification_count += 1;
|
||||
int start_index = key_to_index(key);
|
||||
for (int roll_over = 0; roll_over <= _max_distance_from_start_index; roll_over += 1) {
|
||||
int index = (start_index + roll_over) % _capacity;
|
||||
Entry *entry = &_entries[index];
|
||||
|
||||
if (!entry->used)
|
||||
zig_panic("key not found");
|
||||
|
||||
if (!EqualFn(entry->key, key))
|
||||
continue;
|
||||
|
||||
for (; roll_over < _capacity; roll_over += 1) {
|
||||
int next_index = (start_index + roll_over + 1) % _capacity;
|
||||
Entry *next_entry = &_entries[next_index];
|
||||
if (!next_entry->used || next_entry->distance_from_start_index == 0) {
|
||||
entry->used = false;
|
||||
_size -= 1;
|
||||
return;
|
||||
}
|
||||
*entry = *next_entry;
|
||||
entry->distance_from_start_index -= 1;
|
||||
entry = next_entry;
|
||||
}
|
||||
zig_panic("shifting everything in the table");
|
||||
}
|
||||
zig_panic("key not found");
|
||||
}
|
||||
|
||||
class Iterator {
|
||||
public:
|
||||
Entry *next() {
|
||||
if (_inital_modification_count != _table->_modification_count)
|
||||
zig_panic("concurrent modification");
|
||||
if (_count >= _table->size())
|
||||
return NULL;
|
||||
for (; _index < _table->_capacity; _index += 1) {
|
||||
Entry *entry = &_table->_entries[_index];
|
||||
if (entry->used) {
|
||||
_index += 1;
|
||||
_count += 1;
|
||||
return entry;
|
||||
}
|
||||
}
|
||||
zig_panic("no next item");
|
||||
}
|
||||
|
||||
private:
|
||||
const HashMap * _table;
|
||||
// how many items have we returned
|
||||
int _count = 0;
|
||||
// iterator through the entry array
|
||||
int _index = 0;
|
||||
// used to detect concurrent modification
|
||||
uint32_t _inital_modification_count;
|
||||
Iterator(const HashMap * table) :
|
||||
_table(table), _inital_modification_count(table->_modification_count) {
|
||||
}
|
||||
friend HashMap;
|
||||
};
|
||||
|
||||
// you must not modify the underlying HashMap while this iterator is still in use
|
||||
Iterator entry_iterator() const {
|
||||
return Iterator(this);
|
||||
}
|
||||
|
||||
private:
|
||||
Entry *_entries;
|
||||
int _capacity;
|
||||
int _size;
|
||||
int _max_distance_from_start_index;
|
||||
// this is used to detect bugs where a hashtable is edited while an iterator is running.
|
||||
uint32_t _modification_count;
|
||||
|
||||
void init_capacity(Allocator& allocator, int capacity) {
|
||||
_capacity = capacity;
|
||||
_entries = allocator.allocate<Entry>(_capacity);
|
||||
_size = 0;
|
||||
_max_distance_from_start_index = 0;
|
||||
for (int i = 0; i < _capacity; i += 1) {
|
||||
_entries[i].used = false;
|
||||
}
|
||||
}
|
||||
|
||||
void internal_put(K key, V value) {
|
||||
int start_index = key_to_index(key);
|
||||
for (int roll_over = 0, distance_from_start_index = 0;
|
||||
roll_over < _capacity; roll_over += 1, distance_from_start_index += 1)
|
||||
{
|
||||
int index = (start_index + roll_over) % _capacity;
|
||||
Entry *entry = &_entries[index];
|
||||
|
||||
if (entry->used && !EqualFn(entry->key, key)) {
|
||||
if (entry->distance_from_start_index < distance_from_start_index) {
|
||||
// robin hood to the rescue
|
||||
Entry tmp = *entry;
|
||||
if (distance_from_start_index > _max_distance_from_start_index)
|
||||
_max_distance_from_start_index = distance_from_start_index;
|
||||
*entry = {
|
||||
key,
|
||||
value,
|
||||
true,
|
||||
distance_from_start_index,
|
||||
};
|
||||
key = tmp.key;
|
||||
value = tmp.value;
|
||||
distance_from_start_index = tmp.distance_from_start_index;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!entry->used) {
|
||||
// adding an entry. otherwise overwriting old value with
|
||||
// same key
|
||||
_size += 1;
|
||||
}
|
||||
|
||||
if (distance_from_start_index > _max_distance_from_start_index)
|
||||
_max_distance_from_start_index = distance_from_start_index;
|
||||
*entry = {
|
||||
key,
|
||||
value,
|
||||
true,
|
||||
distance_from_start_index,
|
||||
};
|
||||
return;
|
||||
}
|
||||
zig_panic("put into a full HashMap");
|
||||
}
|
||||
|
||||
|
||||
Entry *internal_get(const K &key) const {
|
||||
int start_index = key_to_index(key);
|
||||
for (int roll_over = 0; roll_over <= _max_distance_from_start_index; roll_over += 1) {
|
||||
int index = (start_index + roll_over) % _capacity;
|
||||
Entry *entry = &_entries[index];
|
||||
|
||||
if (!entry->used)
|
||||
return NULL;
|
||||
|
||||
if (EqualFn(entry->key, key))
|
||||
return entry;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int key_to_index(const K &key) const {
|
||||
return (int)(HashFunction(key) % ((uint32_t)_capacity));
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace mem
|
||||
|
||||
#endif
|
101
src/mem_list.hpp
Normal file
101
src/mem_list.hpp
Normal file
@ -0,0 +1,101 @@
|
||||
/*
|
||||
* Copyright (c) 2015 Andrew Kelley
|
||||
*
|
||||
* This file is part of zig, which is MIT licensed.
|
||||
* See http://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
#ifndef ZIG_MEM_LIST_HPP
|
||||
#define ZIG_MEM_LIST_HPP
|
||||
|
||||
#include "mem.hpp"
|
||||
|
||||
namespace mem {
|
||||
|
||||
template<typename T>
|
||||
struct List {
|
||||
void deinit(Allocator& allocator) {
|
||||
allocator.deallocate<T>(items, capacity);
|
||||
}
|
||||
|
||||
void append(Allocator& allocator, const T& item) {
|
||||
ensure_capacity(allocator, length + 1);
|
||||
items[length++] = item;
|
||||
}
|
||||
|
||||
// remember that the pointer to this item is invalid after you
|
||||
// modify the length of the list
|
||||
const T & at(size_t index) const {
|
||||
assert(index != SIZE_MAX);
|
||||
assert(index < length);
|
||||
return items[index];
|
||||
}
|
||||
|
||||
T & at(size_t index) {
|
||||
assert(index != SIZE_MAX);
|
||||
assert(index < length);
|
||||
return items[index];
|
||||
}
|
||||
|
||||
T pop() {
|
||||
assert(length >= 1);
|
||||
return items[--length];
|
||||
}
|
||||
|
||||
T *add_one() {
|
||||
resize(length + 1);
|
||||
return &last();
|
||||
}
|
||||
|
||||
const T & last() const {
|
||||
assert(length >= 1);
|
||||
return items[length - 1];
|
||||
}
|
||||
|
||||
T & last() {
|
||||
assert(length >= 1);
|
||||
return items[length - 1];
|
||||
}
|
||||
|
||||
void resize(Allocator& allocator, size_t new_length) {
|
||||
assert(new_length != SIZE_MAX);
|
||||
ensure_capacity(allocator, new_length);
|
||||
length = new_length;
|
||||
}
|
||||
|
||||
void clear() {
|
||||
length = 0;
|
||||
}
|
||||
|
||||
void ensure_capacity(Allocator& allocator, size_t new_capacity) {
|
||||
if (capacity >= new_capacity)
|
||||
return;
|
||||
|
||||
size_t better_capacity = capacity;
|
||||
do {
|
||||
better_capacity = better_capacity * 5 / 2 + 8;
|
||||
} while (better_capacity < new_capacity);
|
||||
|
||||
items = allocator.reallocate_nonzero<T>(items, capacity, better_capacity);
|
||||
capacity = better_capacity;
|
||||
}
|
||||
|
||||
T swap_remove(size_t index) {
|
||||
if (length - 1 == index) return pop();
|
||||
|
||||
assert(index != SIZE_MAX);
|
||||
assert(index < length);
|
||||
|
||||
T old_item = items[index];
|
||||
items[index] = pop();
|
||||
return old_item;
|
||||
}
|
||||
|
||||
T *items{nullptr};
|
||||
size_t length{0};
|
||||
size_t capacity{0};
|
||||
};
|
||||
|
||||
} // namespace mem
|
||||
|
||||
#endif
|
181
src/mem_profile.cpp
Normal file
181
src/mem_profile.cpp
Normal file
@ -0,0 +1,181 @@
|
||||
/*
|
||||
* Copyright (c) 2020 Andrew Kelley
|
||||
*
|
||||
* This file is part of zig, which is MIT licensed.
|
||||
* See http://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#ifdef ZIG_ENABLE_MEM_PROFILE
|
||||
|
||||
#include "mem.hpp"
|
||||
#include "mem_list.hpp"
|
||||
#include "mem_profile.hpp"
|
||||
#include "heap.hpp"
|
||||
|
||||
namespace mem {
|
||||
|
||||
void Profile::init(const char *name, const char *kind) {
|
||||
this->name = name;
|
||||
this->kind = kind;
|
||||
this->usage_table.init(heap::bootstrap_allocator, 1024);
|
||||
}
|
||||
|
||||
void Profile::deinit() {
|
||||
assert(this->name != nullptr);
|
||||
if (mem::report_print)
|
||||
this->print_report();
|
||||
this->usage_table.deinit(heap::bootstrap_allocator);
|
||||
this->name = nullptr;
|
||||
}
|
||||
|
||||
void Profile::record_alloc(const TypeInfo &info, size_t count) {
|
||||
if (count == 0) return;
|
||||
auto existing_entry = this->usage_table.put_unique(
|
||||
heap::bootstrap_allocator,
|
||||
UsageKey{info.name_ptr, info.name_len},
|
||||
Entry{info, 1, count, 0, 0} );
|
||||
if (existing_entry != nullptr) {
|
||||
assert(existing_entry->value.info.size == info.size); // allocated name does not match type
|
||||
existing_entry->value.alloc.calls += 1;
|
||||
existing_entry->value.alloc.objects += count;
|
||||
}
|
||||
}
|
||||
|
||||
void Profile::record_dealloc(const TypeInfo &info, size_t count) {
|
||||
if (count == 0) return;
|
||||
auto existing_entry = this->usage_table.maybe_get(UsageKey{info.name_ptr, info.name_len});
|
||||
if (existing_entry == nullptr) {
|
||||
fprintf(stderr, "deallocated name '");
|
||||
for (size_t i = 0; i < info.name_len; ++i)
|
||||
fputc(info.name_ptr[i], stderr);
|
||||
zig_panic("' (size %zu) not found in allocated table; compromised memory usage stats", info.size);
|
||||
}
|
||||
if (existing_entry->value.info.size != info.size) {
|
||||
fprintf(stderr, "deallocated name '");
|
||||
for (size_t i = 0; i < info.name_len; ++i)
|
||||
fputc(info.name_ptr[i], stderr);
|
||||
zig_panic("' does not match expected type size %zu", info.size);
|
||||
}
|
||||
assert(existing_entry->value.alloc.calls - existing_entry->value.dealloc.calls > 0);
|
||||
assert(existing_entry->value.alloc.objects - existing_entry->value.dealloc.objects >= count);
|
||||
existing_entry->value.dealloc.calls += 1;
|
||||
existing_entry->value.dealloc.objects += count;
|
||||
}
|
||||
|
||||
static size_t entry_remain_total_bytes(const Profile::Entry *entry) {
|
||||
return (entry->alloc.objects - entry->dealloc.objects) * entry->info.size;
|
||||
}
|
||||
|
||||
static int entry_compare(const void *a, const void *b) {
|
||||
size_t total_a = entry_remain_total_bytes(*reinterpret_cast<Profile::Entry *const *>(a));
|
||||
size_t total_b = entry_remain_total_bytes(*reinterpret_cast<Profile::Entry *const *>(b));
|
||||
if (total_a > total_b)
|
||||
return -1;
|
||||
if (total_a < total_b)
|
||||
return 1;
|
||||
return 0;
|
||||
};
|
||||
|
||||
void Profile::print_report(FILE *file) {
|
||||
if (!file) {
|
||||
file = report_file;
|
||||
if (!file)
|
||||
file = stderr;
|
||||
}
|
||||
fprintf(file, "\n--- MEMORY PROFILE REPORT [%s]: %s ---\n", this->kind, this->name);
|
||||
|
||||
List<const Entry *> list;
|
||||
auto it = this->usage_table.entry_iterator();
|
||||
for (;;) {
|
||||
auto entry = it.next();
|
||||
if (!entry)
|
||||
break;
|
||||
list.append(heap::bootstrap_allocator, &entry->value);
|
||||
}
|
||||
|
||||
qsort(list.items, list.length, sizeof(const Entry *), entry_compare);
|
||||
|
||||
size_t total_bytes_alloc = 0;
|
||||
size_t total_bytes_dealloc = 0;
|
||||
|
||||
size_t total_calls_alloc = 0;
|
||||
size_t total_calls_dealloc = 0;
|
||||
|
||||
for (size_t i = 0; i < list.length; i += 1) {
|
||||
const Entry *entry = list.at(i);
|
||||
fprintf(file, " ");
|
||||
for (size_t j = 0; j < entry->info.name_len; ++j)
|
||||
fputc(entry->info.name_ptr[j], file);
|
||||
fprintf(file, ": %zu bytes each", entry->info.size);
|
||||
|
||||
fprintf(file, ", alloc{ %zu calls, %zu objects, total ", entry->alloc.calls, entry->alloc.objects);
|
||||
const auto alloc_num_bytes = entry->alloc.objects * entry->info.size;
|
||||
zig_pretty_print_bytes(file, alloc_num_bytes);
|
||||
|
||||
fprintf(file, " }, dealloc{ %zu calls, %zu objects, total ", entry->dealloc.calls, entry->dealloc.objects);
|
||||
const auto dealloc_num_bytes = entry->dealloc.objects * entry->info.size;
|
||||
zig_pretty_print_bytes(file, dealloc_num_bytes);
|
||||
|
||||
fprintf(file, " }, remain{ %zu calls, %zu objects, total ",
|
||||
entry->alloc.calls - entry->dealloc.calls,
|
||||
entry->alloc.objects - entry->dealloc.objects );
|
||||
const auto remain_num_bytes = alloc_num_bytes - dealloc_num_bytes;
|
||||
zig_pretty_print_bytes(file, remain_num_bytes);
|
||||
|
||||
fprintf(file, " }\n");
|
||||
|
||||
total_bytes_alloc += alloc_num_bytes;
|
||||
total_bytes_dealloc += dealloc_num_bytes;
|
||||
|
||||
total_calls_alloc += entry->alloc.calls;
|
||||
total_calls_dealloc += entry->dealloc.calls;
|
||||
}
|
||||
|
||||
fprintf(file, "\n Total bytes allocated: ");
|
||||
zig_pretty_print_bytes(file, total_bytes_alloc);
|
||||
fprintf(file, ", deallocated: ");
|
||||
zig_pretty_print_bytes(file, total_bytes_dealloc);
|
||||
fprintf(file, ", remaining: ");
|
||||
zig_pretty_print_bytes(file, total_bytes_alloc - total_bytes_dealloc);
|
||||
|
||||
fprintf(file, "\n Total calls alloc: %zu, dealloc: %zu, remain: %zu\n",
|
||||
total_calls_alloc, total_calls_dealloc, (total_calls_alloc - total_calls_dealloc));
|
||||
|
||||
list.deinit(heap::bootstrap_allocator);
|
||||
}
|
||||
|
||||
uint32_t Profile::usage_hash(UsageKey key) {
|
||||
// FNV 32-bit hash
|
||||
uint32_t h = 2166136261;
|
||||
for (size_t i = 0; i < key.name_len; ++i) {
|
||||
h = h ^ key.name_ptr[i];
|
||||
h = h * 16777619;
|
||||
}
|
||||
return h;
|
||||
}
|
||||
|
||||
bool Profile::usage_equal(UsageKey a, UsageKey b) {
|
||||
return memcmp(a.name_ptr, b.name_ptr, a.name_len > b.name_len ? a.name_len : b.name_len) == 0;
|
||||
}
|
||||
|
||||
void InternCounters::print_report(FILE *file) {
|
||||
if (!file) {
|
||||
file = report_file;
|
||||
if (!file)
|
||||
file = stderr;
|
||||
}
|
||||
fprintf(file, "\n--- IR INTERNING REPORT ---\n");
|
||||
fprintf(file, " undefined: interned %zu times\n", intern_counters.x_undefined);
|
||||
fprintf(file, " void: interned %zu times\n", intern_counters.x_void);
|
||||
fprintf(file, " null: interned %zu times\n", intern_counters.x_null);
|
||||
fprintf(file, " unreachable: interned %zu times\n", intern_counters.x_unreachable);
|
||||
fprintf(file, " zero_byte: interned %zu times\n", intern_counters.zero_byte);
|
||||
}
|
||||
|
||||
InternCounters intern_counters;
|
||||
|
||||
} // namespace mem
|
||||
|
||||
#endif
|
71
src/mem_profile.hpp
Normal file
71
src/mem_profile.hpp
Normal file
@ -0,0 +1,71 @@
|
||||
/*
|
||||
* Copyright (c) 2020 Andrew Kelley
|
||||
*
|
||||
* This file is part of zig, which is MIT licensed.
|
||||
* See http://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
#ifndef ZIG_MEM_PROFILE_HPP
|
||||
#define ZIG_MEM_PROFILE_HPP
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#ifdef ZIG_ENABLE_MEM_PROFILE
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#include "mem.hpp"
|
||||
#include "mem_hash_map.hpp"
|
||||
#include "util.hpp"
|
||||
|
||||
namespace mem {
|
||||
|
||||
struct Profile {
|
||||
void init(const char *name, const char *kind);
|
||||
void deinit();
|
||||
|
||||
void record_alloc(const TypeInfo &info, size_t count);
|
||||
void record_dealloc(const TypeInfo &info, size_t count);
|
||||
|
||||
void print_report(FILE *file = nullptr);
|
||||
|
||||
struct Entry {
|
||||
TypeInfo info;
|
||||
|
||||
struct Use {
|
||||
size_t calls;
|
||||
size_t objects;
|
||||
} alloc, dealloc;
|
||||
};
|
||||
|
||||
private:
|
||||
const char *name;
|
||||
const char *kind;
|
||||
|
||||
struct UsageKey {
|
||||
const char *name_ptr;
|
||||
size_t name_len;
|
||||
};
|
||||
|
||||
static uint32_t usage_hash(UsageKey key);
|
||||
static bool usage_equal(UsageKey a, UsageKey b);
|
||||
|
||||
HashMap<UsageKey, Entry, usage_hash, usage_equal> usage_table;
|
||||
};
|
||||
|
||||
struct InternCounters {
|
||||
size_t x_undefined;
|
||||
size_t x_void;
|
||||
size_t x_null;
|
||||
size_t x_unreachable;
|
||||
size_t zero_byte;
|
||||
|
||||
void print_report(FILE *file = nullptr);
|
||||
};
|
||||
|
||||
extern InternCounters intern_counters;
|
||||
|
||||
} // namespace mem
|
||||
|
||||
#endif
|
||||
#endif
|
136
src/mem_type_info.hpp
Normal file
136
src/mem_type_info.hpp
Normal file
@ -0,0 +1,136 @@
|
||||
/*
|
||||
* Copyright (c) 2020 Andrew Kelley
|
||||
*
|
||||
* This file is part of zig, which is MIT licensed.
|
||||
* See http://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
#ifndef ZIG_MEM_TYPE_INFO_HPP
|
||||
#define ZIG_MEM_TYPE_INFO_HPP
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#ifndef ZIG_TYPE_INFO_IMPLEMENTATION
|
||||
# ifdef ZIG_ENABLE_MEM_PROFILE
|
||||
# define ZIG_TYPE_INFO_IMPLEMENTATION 1
|
||||
# else
|
||||
# define ZIG_TYPE_INFO_IMPLEMENTATION 0
|
||||
# endif
|
||||
#endif
|
||||
|
||||
namespace mem {
|
||||
|
||||
#if ZIG_TYPE_INFO_IMPLEMENTATION == 0
|
||||
|
||||
struct TypeInfo {
|
||||
size_t size;
|
||||
size_t alignment;
|
||||
|
||||
template <typename T>
|
||||
static constexpr TypeInfo make() {
|
||||
return {sizeof(T), alignof(T)};
|
||||
}
|
||||
};
|
||||
|
||||
#elif ZIG_TYPE_INFO_IMPLEMENTATION == 1
|
||||
|
||||
//
|
||||
// A non-portable way to get a human-readable type-name compatible with
|
||||
// non-RTTI C++ compiler mode; eg. `-fno-rtti`.
|
||||
//
|
||||
// Minimum requirements are c++11 and a compiler that has a constant for the
|
||||
// current function's decorated name whereby a template-type name can be
|
||||
// computed. eg. `__PRETTY_FUNCTION__` or `__FUNCSIG__`.
|
||||
//
|
||||
// given the following snippet:
|
||||
//
|
||||
// | #include <stdio.h>
|
||||
// |
|
||||
// | struct Top {};
|
||||
// | namespace mynamespace {
|
||||
// | using custom = unsigned int;
|
||||
// | struct Foo {
|
||||
// | struct Bar {};
|
||||
// | };
|
||||
// | };
|
||||
// |
|
||||
// | template <typename T>
|
||||
// | void foobar() {
|
||||
// | #ifdef _MSC_VER
|
||||
// | fprintf(stderr, "--> %s\n", __FUNCSIG__);
|
||||
// | #else
|
||||
// | fprintf(stderr, "--> %s\n", __PRETTY_FUNCTION__);
|
||||
// | #endif
|
||||
// | }
|
||||
// |
|
||||
// | int main() {
|
||||
// | foobar<Top>();
|
||||
// | foobar<unsigned int>();
|
||||
// | foobar<mynamespace::custom>();
|
||||
// | foobar<mynamespace::Foo*>();
|
||||
// | foobar<mynamespace::Foo::Bar*>();
|
||||
// | }
|
||||
//
|
||||
// gcc 9.2.0 produces:
|
||||
// --> void foobar() [with T = Top]
|
||||
// --> void foobar() [with T = unsigned int]
|
||||
// --> void foobar() [with T = unsigned int]
|
||||
// --> void foobar() [with T = mynamespace::Foo*]
|
||||
// --> void foobar() [with T = mynamespace::Foo::Bar*]
|
||||
//
|
||||
// xcode 11.3.1/clang produces:
|
||||
// --> void foobar() [T = Top]
|
||||
// --> void foobar() [T = unsigned int]
|
||||
// --> void foobar() [T = unsigned int]
|
||||
// --> void foobar() [T = mynamespace::Foo *]
|
||||
// --> void foobar() [T = mynamespace::Foo::Bar *]
|
||||
//
|
||||
// VStudio 2019 16.5.0/msvc produces:
|
||||
// --> void __cdecl foobar<struct Top>(void)
|
||||
// --> void __cdecl foobar<unsigned int>(void)
|
||||
// --> void __cdecl foobar<unsigned int>(void)
|
||||
// --> void __cdecl foobar<structmynamespace::Foo*>(void)
|
||||
// --> void __cdecl foobar<structmynamespace::Foo::Bar*>(void)
|
||||
//
|
||||
struct TypeInfo {
|
||||
const char *name_ptr;
|
||||
size_t name_len;
|
||||
size_t size;
|
||||
size_t alignment;
|
||||
|
||||
static constexpr TypeInfo to_type_info(const char *str, size_t start, size_t end, size_t size, size_t alignment) {
|
||||
return TypeInfo{str + start, end - start, size, alignment};
|
||||
}
|
||||
|
||||
static constexpr size_t index_of(const char *str, char c) {
|
||||
return *str == c ? 0 : 1 + index_of(str + 1, c);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static constexpr const char *decorated_name() {
|
||||
#ifdef _MSC_VER
|
||||
return __FUNCSIG__;
|
||||
#else
|
||||
return __PRETTY_FUNCTION__;
|
||||
#endif
|
||||
}
|
||||
|
||||
static constexpr TypeInfo extract(const char *decorated, size_t size, size_t alignment) {
|
||||
#ifdef _MSC_VER
|
||||
return to_type_info(decorated, index_of(decorated, '<') + 1, index_of(decorated, '>'), size, alignment);
|
||||
#else
|
||||
return to_type_info(decorated, index_of(decorated, '=') + 2, index_of(decorated, ']'), size, alignment);
|
||||
#endif
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static constexpr TypeInfo make() {
|
||||
return TypeInfo::extract(TypeInfo::decorated_name<T>(), sizeof(T), alignof(T));
|
||||
}
|
||||
};
|
||||
|
||||
#endif // ZIG_TYPE_INFO_IMPLEMENTATION
|
||||
|
||||
} // namespace mem
|
||||
|
||||
#endif
|
@ -1,150 +0,0 @@
|
||||
#include "memory_profiling.hpp"
|
||||
#include "hash_map.hpp"
|
||||
#include "list.hpp"
|
||||
#include "util.hpp"
|
||||
#include <string.h>
|
||||
|
||||
#ifdef ZIG_ENABLE_MEM_PROFILE
|
||||
|
||||
MemprofInternCount memprof_intern_count;
|
||||
|
||||
static bool str_eql_str(const char *a, const char *b) {
|
||||
return strcmp(a, b) == 0;
|
||||
}
|
||||
|
||||
static uint32_t str_hash(const char *s) {
|
||||
// FNV 32-bit hash
|
||||
uint32_t h = 2166136261;
|
||||
for (; *s; s += 1) {
|
||||
h = h ^ *s;
|
||||
h = h * 16777619;
|
||||
}
|
||||
return h;
|
||||
}
|
||||
|
||||
struct CountAndSize {
|
||||
size_t item_count;
|
||||
size_t type_size;
|
||||
};
|
||||
|
||||
ZigList<const char *> unknown_names = {};
|
||||
HashMap<const char *, CountAndSize, str_hash, str_eql_str> usage_table = {};
|
||||
bool table_active = false;
|
||||
|
||||
static const char *get_default_name(const char *name_or_null, size_t type_size) {
|
||||
if (name_or_null != nullptr) return name_or_null;
|
||||
if (type_size >= unknown_names.length) {
|
||||
table_active = false;
|
||||
while (type_size >= unknown_names.length) {
|
||||
unknown_names.append(nullptr);
|
||||
}
|
||||
table_active = true;
|
||||
}
|
||||
if (unknown_names.at(type_size) == nullptr) {
|
||||
char buf[100];
|
||||
sprintf(buf, "Unknown_%zu%c", type_size, 0);
|
||||
unknown_names.at(type_size) = strdup(buf);
|
||||
}
|
||||
return unknown_names.at(type_size);
|
||||
}
|
||||
|
||||
void memprof_alloc(const char *name, size_t count, size_t type_size) {
|
||||
if (!table_active) return;
|
||||
if (count == 0) return;
|
||||
// temporarily disable during table put
|
||||
table_active = false;
|
||||
name = get_default_name(name, type_size);
|
||||
auto existing_entry = usage_table.put_unique(name, {count, type_size});
|
||||
if (existing_entry != nullptr) {
|
||||
assert(existing_entry->value.type_size == type_size); // allocated name does not match type
|
||||
existing_entry->value.item_count += count;
|
||||
}
|
||||
table_active = true;
|
||||
}
|
||||
|
||||
void memprof_dealloc(const char *name, size_t count, size_t type_size) {
|
||||
if (!table_active) return;
|
||||
if (count == 0) return;
|
||||
name = get_default_name(name, type_size);
|
||||
auto existing_entry = usage_table.maybe_get(name);
|
||||
if (existing_entry == nullptr) {
|
||||
zig_panic("deallocated name '%s' (size %zu) not found in allocated table; compromised memory usage stats",
|
||||
name, type_size);
|
||||
}
|
||||
if (existing_entry->value.type_size != type_size) {
|
||||
zig_panic("deallocated name '%s' does not match expected type size %zu", name, type_size);
|
||||
}
|
||||
existing_entry->value.item_count -= count;
|
||||
}
|
||||
|
||||
void memprof_init(void) {
|
||||
usage_table.init(1024);
|
||||
table_active = true;
|
||||
}
|
||||
|
||||
struct MemItem {
|
||||
const char *type_name;
|
||||
CountAndSize count_and_size;
|
||||
};
|
||||
|
||||
static size_t get_bytes(const MemItem *item) {
|
||||
return item->count_and_size.item_count * item->count_and_size.type_size;
|
||||
}
|
||||
|
||||
static int compare_bytes_desc(const void *a, const void *b) {
|
||||
size_t size_a = get_bytes((const MemItem *)(a));
|
||||
size_t size_b = get_bytes((const MemItem *)(b));
|
||||
if (size_a > size_b)
|
||||
return -1;
|
||||
if (size_a < size_b)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void memprof_dump_stats(FILE *file) {
|
||||
assert(table_active);
|
||||
// disable modifications from this function
|
||||
table_active = false;
|
||||
|
||||
ZigList<MemItem> list = {};
|
||||
|
||||
auto it = usage_table.entry_iterator();
|
||||
for (;;) {
|
||||
auto *entry = it.next();
|
||||
if (!entry)
|
||||
break;
|
||||
|
||||
list.append({entry->key, entry->value});
|
||||
}
|
||||
|
||||
qsort(list.items, list.length, sizeof(MemItem), compare_bytes_desc);
|
||||
|
||||
size_t total_bytes_used = 0;
|
||||
|
||||
for (size_t i = 0; i < list.length; i += 1) {
|
||||
const MemItem *item = &list.at(i);
|
||||
fprintf(file, "%s: %zu items, %zu bytes each, total ", item->type_name,
|
||||
item->count_and_size.item_count, item->count_and_size.type_size);
|
||||
size_t bytes = get_bytes(item);
|
||||
zig_pretty_print_bytes(file, bytes);
|
||||
fprintf(file, "\n");
|
||||
|
||||
total_bytes_used += bytes;
|
||||
}
|
||||
|
||||
fprintf(stderr, "Total bytes used: ");
|
||||
zig_pretty_print_bytes(file, total_bytes_used);
|
||||
fprintf(file, "\n");
|
||||
|
||||
list.deinit();
|
||||
table_active = true;
|
||||
|
||||
fprintf(stderr, "\n");
|
||||
fprintf(stderr, "undefined: interned %zu times\n", memprof_intern_count.x_undefined);
|
||||
fprintf(stderr, "void: interned %zu times\n", memprof_intern_count.x_void);
|
||||
fprintf(stderr, "null: interned %zu times\n", memprof_intern_count.x_null);
|
||||
fprintf(stderr, "unreachable: interned %zu times\n", memprof_intern_count.x_unreachable);
|
||||
fprintf(stderr, "zero_byte: interned %zu times\n", memprof_intern_count.zero_byte);
|
||||
}
|
||||
|
||||
#endif
|
@ -1,31 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2019 Andrew Kelley
|
||||
*
|
||||
* This file is part of zig, which is MIT licensed.
|
||||
* See http://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
#ifndef ZIG_MEMORY_PROFILING_HPP
|
||||
#define ZIG_MEMORY_PROFILING_HPP
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdio.h>
|
||||
|
||||
struct MemprofInternCount {
|
||||
size_t x_undefined;
|
||||
size_t x_void;
|
||||
size_t x_null;
|
||||
size_t x_unreachable;
|
||||
size_t zero_byte;
|
||||
};
|
||||
extern MemprofInternCount memprof_intern_count;
|
||||
|
||||
void memprof_init(void);
|
||||
|
||||
void memprof_alloc(const char *name, size_t item_count, size_t type_size);
|
||||
void memprof_dealloc(const char *name, size_t item_count, size_t type_size);
|
||||
|
||||
void memprof_dump_stats(FILE *file);
|
||||
#endif
|
14
src/os.cpp
14
src/os.cpp
@ -107,7 +107,7 @@ static void populate_termination(Termination *term, int status) {
|
||||
}
|
||||
|
||||
static void os_spawn_process_posix(ZigList<const char *> &args, Termination *term) {
|
||||
const char **argv = allocate<const char *>(args.length + 1);
|
||||
const char **argv = heap::c_allocator.allocate<const char *>(args.length + 1);
|
||||
for (size_t i = 0; i < args.length; i += 1) {
|
||||
argv[i] = args.at(i);
|
||||
}
|
||||
@ -688,7 +688,7 @@ static Buf os_path_resolve_posix(Buf **paths_ptr, size_t paths_len) {
|
||||
|
||||
if (have_abs) {
|
||||
result_len = max_size;
|
||||
result_ptr = allocate_nonzero<uint8_t>(result_len);
|
||||
result_ptr = heap::c_allocator.allocate_nonzero<uint8_t>(result_len);
|
||||
} else {
|
||||
Buf cwd = BUF_INIT;
|
||||
int err;
|
||||
@ -696,7 +696,7 @@ static Buf os_path_resolve_posix(Buf **paths_ptr, size_t paths_len) {
|
||||
zig_panic("get cwd failed");
|
||||
}
|
||||
result_len = max_size + buf_len(&cwd) + 1;
|
||||
result_ptr = allocate_nonzero<uint8_t>(result_len);
|
||||
result_ptr = heap::c_allocator.allocate_nonzero<uint8_t>(result_len);
|
||||
memcpy(result_ptr, buf_ptr(&cwd), buf_len(&cwd));
|
||||
result_index += buf_len(&cwd);
|
||||
}
|
||||
@ -816,7 +816,7 @@ static Error os_exec_process_posix(ZigList<const char *> &args,
|
||||
if (dup2(stderr_pipe[1], STDERR_FILENO) == -1)
|
||||
zig_panic("dup2 failed");
|
||||
|
||||
const char **argv = allocate<const char *>(args.length + 1);
|
||||
const char **argv = heap::c_allocator.allocate<const char *>(args.length + 1);
|
||||
argv[args.length] = nullptr;
|
||||
for (size_t i = 0; i < args.length; i += 1) {
|
||||
argv[i] = args.at(i);
|
||||
@ -1134,7 +1134,7 @@ static bool is_stderr_cyg_pty(void) {
|
||||
if (stderr_handle == INVALID_HANDLE_VALUE)
|
||||
return false;
|
||||
|
||||
int size = sizeof(FILE_NAME_INFO) + sizeof(WCHAR) * MAX_PATH;
|
||||
const int size = sizeof(FILE_NAME_INFO) + sizeof(WCHAR) * MAX_PATH;
|
||||
FILE_NAME_INFO *nameinfo;
|
||||
WCHAR *p = NULL;
|
||||
|
||||
@ -1142,7 +1142,7 @@ static bool is_stderr_cyg_pty(void) {
|
||||
if (GetFileType(stderr_handle) != FILE_TYPE_PIPE) {
|
||||
return 0;
|
||||
}
|
||||
nameinfo = (FILE_NAME_INFO *)allocate<char>(size);
|
||||
nameinfo = reinterpret_cast<FILE_NAME_INFO *>(heap::c_allocator.allocate<char>(size));
|
||||
if (nameinfo == NULL) {
|
||||
return 0;
|
||||
}
|
||||
@ -1179,7 +1179,7 @@ static bool is_stderr_cyg_pty(void) {
|
||||
}
|
||||
}
|
||||
}
|
||||
free(nameinfo);
|
||||
heap::c_allocator.deallocate(reinterpret_cast<char *>(nameinfo), size);
|
||||
return (p != NULL);
|
||||
}
|
||||
#endif
|
||||
|
@ -147,7 +147,7 @@ static void ast_invalid_token_error(ParseContext *pc, Token *token) {
|
||||
}
|
||||
|
||||
static AstNode *ast_create_node_no_line_info(ParseContext *pc, NodeType type) {
|
||||
AstNode *node = allocate<AstNode>(1, "AstNode");
|
||||
AstNode *node = heap::c_allocator.create<AstNode>();
|
||||
node->type = type;
|
||||
node->owner = pc->owner;
|
||||
return node;
|
||||
@ -1966,7 +1966,7 @@ static AsmOutput *ast_parse_asm_output_item(ParseContext *pc) {
|
||||
|
||||
expect_token(pc, TokenIdRParen);
|
||||
|
||||
AsmOutput *res = allocate<AsmOutput>(1);
|
||||
AsmOutput *res = heap::c_allocator.create<AsmOutput>();
|
||||
res->asm_symbolic_name = token_buf(sym_name);
|
||||
res->constraint = token_buf(str);
|
||||
res->variable_name = token_buf(var_name);
|
||||
@ -2003,7 +2003,7 @@ static AsmInput *ast_parse_asm_input_item(ParseContext *pc) {
|
||||
AstNode *expr = ast_expect(pc, ast_parse_expr);
|
||||
expect_token(pc, TokenIdRParen);
|
||||
|
||||
AsmInput *res = allocate<AsmInput>(1);
|
||||
AsmInput *res = heap::c_allocator.create<AsmInput>();
|
||||
res->asm_symbolic_name = token_buf(sym_name);
|
||||
res->constraint = token_buf(constraint);
|
||||
res->expr = expr;
|
||||
|
@ -520,7 +520,7 @@ void get_native_target(ZigTarget *target) {
|
||||
target->abi = target_default_abi(target->arch, target->os);
|
||||
}
|
||||
if (target_is_glibc(target)) {
|
||||
target->glibc_version = allocate<ZigGLibCVersion>(1);
|
||||
target->glibc_version = heap::c_allocator.create<ZigGLibCVersion>();
|
||||
target_init_default_glibc_version(target);
|
||||
#ifdef ZIG_OS_LINUX
|
||||
Error err;
|
||||
|
@ -397,10 +397,10 @@ static void invalid_char_error(Tokenize *t, uint8_t c) {
|
||||
void tokenize(Buf *buf, Tokenization *out) {
|
||||
Tokenize t = {0};
|
||||
t.out = out;
|
||||
t.tokens = out->tokens = allocate<ZigList<Token>>(1);
|
||||
t.tokens = out->tokens = heap::c_allocator.create<ZigList<Token>>();
|
||||
t.buf = buf;
|
||||
|
||||
out->line_offsets = allocate<ZigList<size_t>>(1);
|
||||
out->line_offsets = heap::c_allocator.create<ZigList<size_t>>();
|
||||
out->line_offsets->append(0);
|
||||
|
||||
// Skip the UTF-8 BOM if present
|
||||
|
@ -101,7 +101,7 @@ Error stage2_cpu_features_parse(struct Stage2CpuFeatures **out, const char *zig_
|
||||
const char *cpu_name, const char *cpu_features)
|
||||
{
|
||||
if (zig_triple == nullptr) {
|
||||
Stage2CpuFeatures *result = allocate<Stage2CpuFeatures>(1, "Stage2CpuFeatures");
|
||||
Stage2CpuFeatures *result = heap::c_allocator.create<Stage2CpuFeatures>();
|
||||
result->llvm_cpu_name = ZigLLVMGetHostCPUName();
|
||||
result->llvm_cpu_features = ZigLLVMGetNativeFeatures();
|
||||
result->builtin_str = "arch.getBaselineCpuFeatures();\n";
|
||||
@ -110,7 +110,7 @@ Error stage2_cpu_features_parse(struct Stage2CpuFeatures **out, const char *zig_
|
||||
return ErrorNone;
|
||||
}
|
||||
if (cpu_name == nullptr && cpu_features == nullptr) {
|
||||
Stage2CpuFeatures *result = allocate<Stage2CpuFeatures>(1, "Stage2CpuFeatures");
|
||||
Stage2CpuFeatures *result = heap::c_allocator.create<Stage2CpuFeatures>();
|
||||
result->builtin_str = "arch.getBaselineCpuFeatures();\n";
|
||||
result->cache_hash = "\n\n";
|
||||
*out = result;
|
||||
|
132
src/util.hpp
132
src/util.hpp
@ -8,69 +8,19 @@
|
||||
#ifndef ZIG_UTIL_HPP
|
||||
#define ZIG_UTIL_HPP
|
||||
|
||||
#include "memory_profiling.hpp"
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdint.h>
|
||||
#include <string.h>
|
||||
#include <assert.h>
|
||||
#include <ctype.h>
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
|
||||
#include <intrin.h>
|
||||
|
||||
#define ATTRIBUTE_COLD __declspec(noinline)
|
||||
#define ATTRIBUTE_PRINTF(a, b)
|
||||
#define ATTRIBUTE_RETURNS_NOALIAS __declspec(restrict)
|
||||
#define ATTRIBUTE_NORETURN __declspec(noreturn)
|
||||
#define ATTRIBUTE_MUST_USE
|
||||
|
||||
#define BREAKPOINT __debugbreak()
|
||||
|
||||
#else
|
||||
|
||||
#define ATTRIBUTE_COLD __attribute__((cold))
|
||||
#define ATTRIBUTE_PRINTF(a, b) __attribute__((format(printf, a, b)))
|
||||
#define ATTRIBUTE_RETURNS_NOALIAS __attribute__((__malloc__))
|
||||
#define ATTRIBUTE_NORETURN __attribute__((noreturn))
|
||||
#define ATTRIBUTE_MUST_USE __attribute__((warn_unused_result))
|
||||
|
||||
#if defined(__MINGW32__) || defined(__MINGW64__)
|
||||
#define BREAKPOINT __debugbreak()
|
||||
#elif defined(__i386__) || defined(__x86_64__)
|
||||
#define BREAKPOINT __asm__ volatile("int $0x03");
|
||||
#elif defined(__clang__)
|
||||
#define BREAKPOINT __builtin_debugtrap()
|
||||
#elif defined(__GNUC__)
|
||||
#define BREAKPOINT __builtin_trap()
|
||||
#else
|
||||
#include <signal.h>
|
||||
#define BREAKPOINT raise(SIGTRAP)
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
ATTRIBUTE_COLD
|
||||
ATTRIBUTE_NORETURN
|
||||
ATTRIBUTE_PRINTF(1, 2)
|
||||
void zig_panic(const char *format, ...);
|
||||
|
||||
static inline void zig_assert(bool ok, const char *file, int line, const char *func) {
|
||||
if (!ok) {
|
||||
zig_panic("Assertion failed at %s:%d in %s. This is a bug in the Zig compiler.", file, line, func);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef _WIN32
|
||||
#define __func__ __FUNCTION__
|
||||
#endif
|
||||
|
||||
#define zig_unreachable() zig_panic("Unreachable at %s:%d in %s. This is a bug in the Zig compiler.", __FILE__, __LINE__, __func__)
|
||||
|
||||
// Assertions in stage1 are always on, and they call zig @panic.
|
||||
#undef assert
|
||||
#define assert(ok) zig_assert(ok, __FILE__, __LINE__, __func__)
|
||||
#include "config.h"
|
||||
#include "util_base.hpp"
|
||||
#include "heap.hpp"
|
||||
#include "mem.hpp"
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
static inline int clzll(unsigned long long mask) {
|
||||
@ -107,78 +57,6 @@ static inline int ctzll(unsigned long long mask) {
|
||||
#define ctzll(x) __builtin_ctzll(x)
|
||||
#endif
|
||||
|
||||
|
||||
template<typename T>
|
||||
ATTRIBUTE_RETURNS_NOALIAS static inline T *allocate_nonzero(size_t count, const char *name = nullptr) {
|
||||
#ifdef ZIG_ENABLE_MEM_PROFILE
|
||||
memprof_alloc(name, count, sizeof(T));
|
||||
#endif
|
||||
#ifndef NDEBUG
|
||||
// make behavior when size == 0 portable
|
||||
if (count == 0)
|
||||
return nullptr;
|
||||
#endif
|
||||
T *ptr = reinterpret_cast<T*>(malloc(count * sizeof(T)));
|
||||
if (!ptr)
|
||||
zig_panic("allocation failed");
|
||||
return ptr;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
ATTRIBUTE_RETURNS_NOALIAS static inline T *allocate(size_t count, const char *name = nullptr) {
|
||||
#ifdef ZIG_ENABLE_MEM_PROFILE
|
||||
memprof_alloc(name, count, sizeof(T));
|
||||
#endif
|
||||
#ifndef NDEBUG
|
||||
// make behavior when size == 0 portable
|
||||
if (count == 0)
|
||||
return nullptr;
|
||||
#endif
|
||||
T *ptr = reinterpret_cast<T*>(calloc(count, sizeof(T)));
|
||||
if (!ptr)
|
||||
zig_panic("allocation failed");
|
||||
return ptr;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
static inline T *reallocate(T *old, size_t old_count, size_t new_count, const char *name = nullptr) {
|
||||
T *ptr = reallocate_nonzero(old, old_count, new_count);
|
||||
if (new_count > old_count) {
|
||||
memset(&ptr[old_count], 0, (new_count - old_count) * sizeof(T));
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
static inline T *reallocate_nonzero(T *old, size_t old_count, size_t new_count, const char *name = nullptr) {
|
||||
#ifdef ZIG_ENABLE_MEM_PROFILE
|
||||
memprof_dealloc(name, old_count, sizeof(T));
|
||||
memprof_alloc(name, new_count, sizeof(T));
|
||||
#endif
|
||||
#ifndef NDEBUG
|
||||
// make behavior when size == 0 portable
|
||||
if (new_count == 0 && old == nullptr)
|
||||
return nullptr;
|
||||
#endif
|
||||
T *ptr = reinterpret_cast<T*>(realloc(old, new_count * sizeof(T)));
|
||||
if (!ptr)
|
||||
zig_panic("allocation failed");
|
||||
return ptr;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
static inline void deallocate(T *old, size_t count, const char *name = nullptr) {
|
||||
#ifdef ZIG_ENABLE_MEM_PROFILE
|
||||
memprof_dealloc(name, count, sizeof(T));
|
||||
#endif
|
||||
free(old);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
static inline void destroy(T *old, const char *name = nullptr) {
|
||||
return deallocate(old, 1, name);
|
||||
}
|
||||
|
||||
template <typename T, size_t n>
|
||||
constexpr size_t array_length(const T (&)[n]) {
|
||||
return n;
|
||||
@ -293,7 +171,7 @@ struct Slice {
|
||||
}
|
||||
|
||||
static inline Slice<T> alloc(size_t n) {
|
||||
return {allocate_nonzero<T>(n), n};
|
||||
return {heap::c_allocator.allocate_nonzero<T>(n), n};
|
||||
}
|
||||
};
|
||||
|
||||
|
67
src/util_base.hpp
Normal file
67
src/util_base.hpp
Normal file
@ -0,0 +1,67 @@
|
||||
/*
|
||||
* Copyright (c) 2015 Andrew Kelley
|
||||
*
|
||||
* This file is part of zig, which is MIT licensed.
|
||||
* See http://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
#ifndef ZIG_UTIL_BASE_HPP
|
||||
#define ZIG_UTIL_BASE_HPP
|
||||
|
||||
#include <assert.h>
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
|
||||
#define ATTRIBUTE_COLD __declspec(noinline)
|
||||
#define ATTRIBUTE_PRINTF(a, b)
|
||||
#define ATTRIBUTE_RETURNS_NOALIAS __declspec(restrict)
|
||||
#define ATTRIBUTE_NORETURN __declspec(noreturn)
|
||||
#define ATTRIBUTE_MUST_USE
|
||||
|
||||
#define BREAKPOINT __debugbreak()
|
||||
|
||||
#else
|
||||
|
||||
#define ATTRIBUTE_COLD __attribute__((cold))
|
||||
#define ATTRIBUTE_PRINTF(a, b) __attribute__((format(printf, a, b)))
|
||||
#define ATTRIBUTE_RETURNS_NOALIAS __attribute__((__malloc__))
|
||||
#define ATTRIBUTE_NORETURN __attribute__((noreturn))
|
||||
#define ATTRIBUTE_MUST_USE __attribute__((warn_unused_result))
|
||||
|
||||
#if defined(__MINGW32__) || defined(__MINGW64__)
|
||||
#define BREAKPOINT __debugbreak()
|
||||
#elif defined(__i386__) || defined(__x86_64__)
|
||||
#define BREAKPOINT __asm__ volatile("int $0x03");
|
||||
#elif defined(__clang__)
|
||||
#define BREAKPOINT __builtin_debugtrap()
|
||||
#elif defined(__GNUC__)
|
||||
#define BREAKPOINT __builtin_trap()
|
||||
#else
|
||||
#include <signal.h>
|
||||
#define BREAKPOINT raise(SIGTRAP)
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
ATTRIBUTE_COLD
|
||||
ATTRIBUTE_NORETURN
|
||||
ATTRIBUTE_PRINTF(1, 2)
|
||||
void zig_panic(const char *format, ...);
|
||||
|
||||
static inline void zig_assert(bool ok, const char *file, int line, const char *func) {
|
||||
if (!ok) {
|
||||
zig_panic("Assertion failed at %s:%d in %s. This is a bug in the Zig compiler.", file, line, func);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef _WIN32
|
||||
#define __func__ __FUNCTION__
|
||||
#endif
|
||||
|
||||
#define zig_unreachable() zig_panic("Unreachable at %s:%d in %s. This is a bug in the Zig compiler.", __FILE__, __LINE__, __func__)
|
||||
|
||||
// Assertions in stage1 are always on, and they call zig @panic.
|
||||
#undef assert
|
||||
#define assert(ok) zig_assert(ok, __FILE__, __LINE__, __func__)
|
||||
|
||||
#endif
|
Loading…
Reference in New Issue
Block a user