stage2+stage1: remove type parameter from bit builtins

Closes #12529
Closes #12511
Closes #6835
This commit is contained in:
Veikka Tuominen 2022-08-21 17:24:04 +03:00
parent 6c020cdb76
commit 62ff8871ed
55 changed files with 264 additions and 268 deletions

View File

@ -8031,8 +8031,8 @@ fn func(y: *i32) void {
{#header_close#}
{#header_open|@byteSwap#}
<pre>{#syntax#}@byteSwap(comptime T: type, operand: T) T{#endsyntax#}</pre>
<p>{#syntax#}T{#endsyntax#} must be an integer type with bit count evenly divisible by 8.</p>
<pre>{#syntax#}@byteSwap(operand: anytype) T{#endsyntax#}</pre>
<p>{#syntax#}@TypeOf(operand){#endsyntax#} must be an integer type or an integer vector type with bit count evenly divisible by 8.</p>
<p>{#syntax#}operand{#endsyntax#} may be an {#link|integer|Integers#} or {#link|vector|Vectors#}.</p>
<p>
Swaps the byte order of the integer. This converts a big endian integer to a little endian integer,
@ -8049,8 +8049,8 @@ fn func(y: *i32) void {
{#header_close#}
{#header_open|@bitReverse#}
<pre>{#syntax#}@bitReverse(comptime T: type, integer: T) T{#endsyntax#}</pre>
<p>{#syntax#}T{#endsyntax#} accepts any integer type.</p>
<pre>{#syntax#}@bitReverse(integer: anytype) T{#endsyntax#}</pre>
<p>{#syntax#}@TypeOf(anytype){#endsyntax#} accepts any integer type or integer vector type.</p>
<p>
Reverses the bitpattern of an integer value, including the sign bit if applicable.
</p>
@ -8189,8 +8189,8 @@ pub const CallOptions = struct {
{#header_close#}
{#header_open|@clz#}
<pre>{#syntax#}@clz(comptime T: type, operand: T){#endsyntax#}</pre>
<p>{#syntax#}T{#endsyntax#} must be an integer type.</p>
<pre>{#syntax#}@clz(operand: anytype){#endsyntax#}</pre>
<p>{#syntax#}@TypeOf(operand){#endsyntax#} must be an integer type or an integer vector type.</p>
<p>{#syntax#}operand{#endsyntax#} may be an {#link|integer|Integers#} or {#link|vector|Vectors#}.</p>
<p>
This function counts the number of most-significant (leading in a big-Endian sense) zeroes in an integer.
@ -8335,8 +8335,8 @@ test "main" {
{#header_close#}
{#header_open|@ctz#}
<pre>{#syntax#}@ctz(comptime T: type, operand: T){#endsyntax#}</pre>
<p>{#syntax#}T{#endsyntax#} must be an integer type.</p>
<pre>{#syntax#}@ctz(operand: anytype){#endsyntax#}</pre>
<p>{#syntax#}@TypeOf(operand){#endsyntax#} must be an integer type or an integer vector type.</p>
<p>{#syntax#}operand{#endsyntax#} may be an {#link|integer|Integers#} or {#link|vector|Vectors#}.</p>
<p>
This function counts the number of least-significant (trailing in a big-Endian sense) zeroes in an integer.
@ -8972,8 +8972,8 @@ test "@wasmMemoryGrow" {
{#header_close#}
{#header_open|@popCount#}
<pre>{#syntax#}@popCount(comptime T: type, operand: T){#endsyntax#}</pre>
<p>{#syntax#}T{#endsyntax#} must be an integer type.</p>
<pre>{#syntax#}@popCount(operand: anytype){#endsyntax#}</pre>
<p>{#syntax#}@TypeOf(operand){#endsyntax#} must be an integer type.</p>
<p>{#syntax#}operand{#endsyntax#} may be an {#link|integer|Integers#} or {#link|vector|Vectors#}.</p>
<p>Counts the number of bits set in an integer.</p>
<p>

View File

@ -9,7 +9,7 @@ const normalize = common.normalize;
pub inline fn addf3(comptime T: type, a: T, b: T) T {
const bits = @typeInfo(T).Float.bits;
const Z = std.meta.Int(.unsigned, bits);
const S = std.meta.Int(.unsigned, bits - @clz(Z, @as(Z, bits) - 1));
const S = std.meta.Int(.unsigned, bits - @clz(@as(Z, bits) - 1));
const typeWidth = bits;
const significandBits = math.floatMantissaBits(T);
@ -118,7 +118,7 @@ pub inline fn addf3(comptime T: type, a: T, b: T) T {
// If partial cancellation occured, we need to left-shift the result
// and adjust the exponent:
if (aSignificand < integerBit << 3) {
const shift = @intCast(i32, @clz(Z, aSignificand)) - @intCast(i32, @clz(std.meta.Int(.unsigned, bits), integerBit << 3));
const shift = @intCast(i32, @clz(aSignificand)) - @intCast(i32, @clz(integerBit << 3));
aSignificand <<= @intCast(S, shift);
aExponent -= shift;
}

View File

@ -192,7 +192,7 @@ pub fn normalize(comptime T: type, significand: *std.meta.Int(.unsigned, @typeIn
const Z = std.meta.Int(.unsigned, @typeInfo(T).Float.bits);
const integerBit = @as(Z, 1) << std.math.floatFractionalBits(T);
const shift = @clz(Z, significand.*) - @clz(Z, integerBit);
const shift = @clz(significand.*) - @clz(integerBit);
significand.* <<= @intCast(std.math.Log2Int(Z), shift);
return @as(i32, 1) - shift;
}

View File

@ -56,8 +56,8 @@ pub inline fn extendf(
// a is denormal.
// renormalize the significand and clear the leading bit, then insert
// the correct adjusted exponent in the destination type.
const scale: u32 = @clz(src_rep_t, aAbs) -
@clz(src_rep_t, @as(src_rep_t, srcMinNormal));
const scale: u32 = @clz(aAbs) -
@clz(@as(src_rep_t, srcMinNormal));
absResult = @as(dst_rep_t, aAbs) << @intCast(DstShift, dstSigBits - srcSigBits + scale);
absResult ^= dstMinNormal;
const resultExponent: u32 = dstExpBias - srcExpBias - scale + 1;
@ -119,8 +119,8 @@ pub inline fn extend_f80(comptime src_t: type, a: std.meta.Int(.unsigned, @typeI
// a is denormal.
// renormalize the significand and clear the leading bit, then insert
// the correct adjusted exponent in the destination type.
const scale: u16 = @clz(src_rep_t, a_abs) -
@clz(src_rep_t, @as(src_rep_t, src_min_normal));
const scale: u16 = @clz(a_abs) -
@clz(@as(src_rep_t, src_min_normal));
dst.fraction = @as(u64, a_abs) << @intCast(u6, dst_sig_bits - src_sig_bits + scale);
dst.fraction |= dst_int_bit; // bit 64 is always set for normal numbers

View File

@ -38,7 +38,7 @@ fn __extendxftf2(a: f80) callconv(.C) f128 {
// a is denormal
// renormalize the significand and clear the leading bit and integer part,
// then insert the correct adjusted exponent in the destination type.
const scale: u32 = @clz(u64, a_rep.fraction);
const scale: u32 = @clz(a_rep.fraction);
abs_result = @as(u128, a_rep.fraction) << @intCast(u7, dst_sig_bits - src_sig_bits + scale + 1);
abs_result ^= dst_min_normal;
abs_result |= @as(u128, scale + 1) << dst_sig_bits;

View File

@ -243,7 +243,7 @@ inline fn div_u32(n: u32, d: u32) u32 {
// special cases
if (d == 0) return 0; // ?!
if (n == 0) return 0;
var sr = @bitCast(c_uint, @as(c_int, @clz(u32, d)) - @as(c_int, @clz(u32, n)));
var sr = @bitCast(c_uint, @as(c_int, @clz(d)) - @as(c_int, @clz(n)));
// 0 <= sr <= n_uword_bits - 1 or sr large
if (sr > n_uword_bits - 1) {
// d > r

View File

@ -23,7 +23,7 @@ pub fn intToFloat(comptime T: type, x: anytype) T {
var result: uT = sign_bit;
// Compute significand
var exp = int_bits - @clz(Z, abs_val) - 1;
var exp = int_bits - @clz(abs_val) - 1;
if (int_bits <= fractional_bits or exp <= fractional_bits) {
const shift_amt = fractional_bits - @intCast(math.Log2Int(uT), exp);
@ -32,7 +32,7 @@ pub fn intToFloat(comptime T: type, x: anytype) T {
result ^= implicit_bit; // Remove implicit integer bit
} else {
var shift_amt = @intCast(math.Log2Int(Z), exp - fractional_bits);
const exact_tie: bool = @ctz(Z, abs_val) == shift_amt - 1;
const exact_tie: bool = @ctz(abs_val) == shift_amt - 1;
// Shift down result and remove implicit integer bit
result = @intCast(uT, (abs_val >> (shift_amt - 1))) ^ (implicit_bit << 1);

View File

@ -186,7 +186,7 @@ fn normalize(comptime T: type, significand: *PowerOfTwoSignificandZ(T)) i32 {
const Z = PowerOfTwoSignificandZ(T);
const integerBit = @as(Z, 1) << math.floatFractionalBits(T);
const shift = @clz(Z, significand.*) - @clz(Z, integerBit);
const shift = @clz(significand.*) - @clz(integerBit);
significand.* <<= @intCast(math.Log2Int(Z), shift);
return @as(i32, 1) - shift;
}

View File

@ -75,12 +75,12 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem:
r[high] = n[high] & (d[high] - 1);
rem.* = @ptrCast(*align(@alignOf(SingleInt)) DoubleInt, &r[0]).*; // TODO issue #421
}
return n[high] >> @intCast(Log2SingleInt, @ctz(SingleInt, d[high]));
return n[high] >> @intCast(Log2SingleInt, @ctz(d[high]));
}
// K K
// ---
// K 0
sr = @bitCast(c_uint, @as(c_int, @clz(SingleInt, d[high])) - @as(c_int, @clz(SingleInt, n[high])));
sr = @bitCast(c_uint, @as(c_int, @clz(d[high])) - @as(c_int, @clz(n[high])));
// 0 <= sr <= single_int_bits - 2 or sr large
if (sr > single_int_bits - 2) {
if (maybe_rem) |rem| {
@ -110,7 +110,7 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem:
if (d[low] == 1) {
return a;
}
sr = @ctz(SingleInt, d[low]);
sr = @ctz(d[low]);
q[high] = n[high] >> @intCast(Log2SingleInt, sr);
q[low] = (n[high] << @intCast(Log2SingleInt, single_int_bits - sr)) | (n[low] >> @intCast(Log2SingleInt, sr));
return @ptrCast(*align(@alignOf(SingleInt)) DoubleInt, &q[0]).*; // TODO issue #421
@ -118,7 +118,7 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem:
// K X
// ---
// 0 K
sr = 1 + single_int_bits + @as(c_uint, @clz(SingleInt, d[low])) - @as(c_uint, @clz(SingleInt, n[high]));
sr = 1 + single_int_bits + @as(c_uint, @clz(d[low])) - @as(c_uint, @clz(n[high]));
// 2 <= sr <= double_int_bits - 1
// q.all = a << (double_int_bits - sr);
// r.all = a >> sr;
@ -144,7 +144,7 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem:
// K X
// ---
// K K
sr = @bitCast(c_uint, @as(c_int, @clz(SingleInt, d[high])) - @as(c_int, @clz(SingleInt, n[high])));
sr = @bitCast(c_uint, @as(c_int, @clz(d[high])) - @as(c_int, @clz(n[high])));
// 0 <= sr <= single_int_bits - 1 or sr large
if (sr > single_int_bits - 1) {
if (maybe_rem) |rem| {

View File

@ -703,7 +703,7 @@ const PosixImpl = struct {
const max_multiplier_bits = @bitSizeOf(usize);
const fibonacci_multiplier = 0x9E3779B97F4A7C15 >> (64 - max_multiplier_bits);
const max_bucket_bits = @ctz(usize, buckets.len);
const max_bucket_bits = @ctz(buckets.len);
comptime assert(std.math.isPowerOfTwo(buckets.len));
const index = (address *% fibonacci_multiplier) >> (max_multiplier_bits - max_bucket_bits);
@ -721,7 +721,7 @@ const PosixImpl = struct {
// then cut off the zero bits from the alignment to get the unique address.
const addr = @ptrToInt(ptr);
assert(addr & (alignment - 1) == 0);
return addr >> @ctz(usize, alignment);
return addr >> @ctz(alignment);
}
};

View File

@ -140,7 +140,7 @@ const FutexImpl = struct {
// - they both seem to mark the cache-line as modified regardless: https://stackoverflow.com/a/63350048
// - `lock bts` is smaller instruction-wise which makes it better for inlining
if (comptime builtin.target.cpu.arch.isX86()) {
const locked_bit = @ctz(u32, @as(u32, locked));
const locked_bit = @ctz(@as(u32, locked));
return self.state.bitSet(locked_bit, .Acquire) == 0;
}

View File

@ -168,8 +168,8 @@ pub const DefaultRwLock = struct {
const IS_WRITING: usize = 1;
const WRITER: usize = 1 << 1;
const READER: usize = 1 << (1 + @bitSizeOf(Count));
const WRITER_MASK: usize = std.math.maxInt(Count) << @ctz(usize, WRITER);
const READER_MASK: usize = std.math.maxInt(Count) << @ctz(usize, READER);
const WRITER_MASK: usize = std.math.maxInt(Count) << @ctz(WRITER);
const READER_MASK: usize = std.math.maxInt(Count) << @ctz(READER);
const Count = std.meta.Int(.unsigned, @divFloor(@bitSizeOf(usize) - 1, 2));
pub fn tryLock(rwl: *DefaultRwLock) bool {

View File

@ -91,7 +91,7 @@ pub fn IntegerBitSet(comptime size: u16) type {
/// Returns the total number of set bits in this bit set.
pub fn count(self: Self) usize {
return @popCount(MaskInt, self.mask);
return @popCount(self.mask);
}
/// Changes the value of the specified bit of the bit
@ -179,7 +179,7 @@ pub fn IntegerBitSet(comptime size: u16) type {
pub fn findFirstSet(self: Self) ?usize {
const mask = self.mask;
if (mask == 0) return null;
return @ctz(MaskInt, mask);
return @ctz(mask);
}
/// Finds the index of the first set bit, and unsets it.
@ -187,7 +187,7 @@ pub fn IntegerBitSet(comptime size: u16) type {
pub fn toggleFirstSet(self: *Self) ?usize {
const mask = self.mask;
if (mask == 0) return null;
const index = @ctz(MaskInt, mask);
const index = @ctz(mask);
self.mask = mask & (mask - 1);
return index;
}
@ -222,12 +222,12 @@ pub fn IntegerBitSet(comptime size: u16) type {
switch (direction) {
.forward => {
const next_index = @ctz(MaskInt, self.bits_remain);
const next_index = @ctz(self.bits_remain);
self.bits_remain &= self.bits_remain - 1;
return next_index;
},
.reverse => {
const leading_zeroes = @clz(MaskInt, self.bits_remain);
const leading_zeroes = @clz(self.bits_remain);
const top_bit = (@bitSizeOf(MaskInt) - 1) - leading_zeroes;
self.bits_remain &= (@as(MaskInt, 1) << @intCast(ShiftInt, top_bit)) - 1;
return top_bit;
@ -347,7 +347,7 @@ pub fn ArrayBitSet(comptime MaskIntType: type, comptime size: usize) type {
pub fn count(self: Self) usize {
var total: usize = 0;
for (self.masks) |mask| {
total += @popCount(MaskInt, mask);
total += @popCount(mask);
}
return total;
}
@ -475,7 +475,7 @@ pub fn ArrayBitSet(comptime MaskIntType: type, comptime size: usize) type {
if (mask != 0) break mask;
offset += @bitSizeOf(MaskInt);
} else return null;
return offset + @ctz(MaskInt, mask);
return offset + @ctz(mask);
}
/// Finds the index of the first set bit, and unsets it.
@ -486,7 +486,7 @@ pub fn ArrayBitSet(comptime MaskIntType: type, comptime size: usize) type {
if (mask.* != 0) break mask;
offset += @bitSizeOf(MaskInt);
} else return null;
const index = @ctz(MaskInt, mask.*);
const index = @ctz(mask.*);
mask.* &= (mask.* - 1);
return offset + index;
}
@ -657,7 +657,7 @@ pub const DynamicBitSetUnmanaged = struct {
var total: usize = 0;
for (self.masks[0..num_masks]) |mask| {
// Note: This is where we depend on padding bits being zero
total += @popCount(MaskInt, mask);
total += @popCount(mask);
}
return total;
}
@ -795,7 +795,7 @@ pub const DynamicBitSetUnmanaged = struct {
mask += 1;
offset += @bitSizeOf(MaskInt);
} else return null;
return offset + @ctz(MaskInt, mask[0]);
return offset + @ctz(mask[0]);
}
/// Finds the index of the first set bit, and unsets it.
@ -808,7 +808,7 @@ pub const DynamicBitSetUnmanaged = struct {
mask += 1;
offset += @bitSizeOf(MaskInt);
} else return null;
const index = @ctz(MaskInt, mask[0]);
const index = @ctz(mask[0]);
mask[0] &= (mask[0] - 1);
return offset + index;
}
@ -1067,12 +1067,12 @@ fn BitSetIterator(comptime MaskInt: type, comptime options: IteratorOptions) typ
switch (direction) {
.forward => {
const next_index = @ctz(MaskInt, self.bits_remain) + self.bit_offset;
const next_index = @ctz(self.bits_remain) + self.bit_offset;
self.bits_remain &= self.bits_remain - 1;
return next_index;
},
.reverse => {
const leading_zeroes = @clz(MaskInt, self.bits_remain);
const leading_zeroes = @clz(self.bits_remain);
const top_bit = (@bitSizeOf(MaskInt) - 1) - leading_zeroes;
const no_top_bit_mask = (@as(MaskInt, 1) << @intCast(ShiftInt, top_bit)) - 1;
self.bits_remain &= no_top_bit_mask;

View File

@ -2,7 +2,7 @@ const math = @import("std").math;
// Reverse bit-by-bit a N-bit code.
pub fn bitReverse(comptime T: type, value: T, N: usize) T {
const r = @bitReverse(T, value);
const r = @bitReverse(value);
return r >> @intCast(math.Log2Int(T), @typeInfo(T).Int.bits - N);
}

View File

@ -66,7 +66,7 @@ fn AesOcb(comptime Aes: anytype) type {
var offset = [_]u8{0} ** 16;
var i: usize = 0;
while (i < full_blocks) : (i += 1) {
xorWith(&offset, lt[@ctz(usize, i + 1)]);
xorWith(&offset, lt[@ctz(i + 1)]);
var e = xorBlocks(offset, a[i * 16 ..][0..16].*);
aes_enc_ctx.encrypt(&e, &e);
xorWith(&sum, e);
@ -129,7 +129,7 @@ fn AesOcb(comptime Aes: anytype) type {
var es: [16 * wb]u8 align(16) = undefined;
var j: usize = 0;
while (j < wb) : (j += 1) {
xorWith(&offset, lt[@ctz(usize, i + 1 + j)]);
xorWith(&offset, lt[@ctz(i + 1 + j)]);
offsets[j] = offset;
const p = m[(i + j) * 16 ..][0..16].*;
mem.copy(u8, es[j * 16 ..][0..16], &xorBlocks(p, offsets[j]));
@ -143,7 +143,7 @@ fn AesOcb(comptime Aes: anytype) type {
}
}
while (i < full_blocks) : (i += 1) {
xorWith(&offset, lt[@ctz(usize, i + 1)]);
xorWith(&offset, lt[@ctz(i + 1)]);
const p = m[i * 16 ..][0..16].*;
var e = xorBlocks(p, offset);
aes_enc_ctx.encrypt(&e, &e);
@ -193,7 +193,7 @@ fn AesOcb(comptime Aes: anytype) type {
var es: [16 * wb]u8 align(16) = undefined;
var j: usize = 0;
while (j < wb) : (j += 1) {
xorWith(&offset, lt[@ctz(usize, i + 1 + j)]);
xorWith(&offset, lt[@ctz(i + 1 + j)]);
offsets[j] = offset;
const q = c[(i + j) * 16 ..][0..16].*;
mem.copy(u8, es[j * 16 ..][0..16], &xorBlocks(q, offsets[j]));
@ -207,7 +207,7 @@ fn AesOcb(comptime Aes: anytype) type {
}
}
while (i < full_blocks) : (i += 1) {
xorWith(&offset, lt[@ctz(usize, i + 1)]);
xorWith(&offset, lt[@ctz(i + 1)]);
const q = c[i * 16 ..][0..16].*;
var e = xorBlocks(q, offset);
aes_dec_ctx.decrypt(&e, &e);

View File

@ -41,8 +41,8 @@ pub const Ghash = struct {
pub fn init(key: *const [key_length]u8) Ghash {
const h1 = mem.readIntBig(u64, key[0..8]);
const h0 = mem.readIntBig(u64, key[8..16]);
const h1r = @bitReverse(u64, h1);
const h0r = @bitReverse(u64, h0);
const h1r = @bitReverse(h1);
const h0r = @bitReverse(h0);
const h2 = h0 ^ h1;
const h2r = h0r ^ h1r;
@ -68,8 +68,8 @@ pub const Ghash = struct {
hh.update(key);
const hh1 = hh.y1;
const hh0 = hh.y0;
const hh1r = @bitReverse(u64, hh1);
const hh0r = @bitReverse(u64, hh0);
const hh1r = @bitReverse(hh1);
const hh0r = @bitReverse(hh0);
const hh2 = hh0 ^ hh1;
const hh2r = hh0r ^ hh1r;
@ -156,8 +156,8 @@ pub const Ghash = struct {
y1 ^= mem.readIntBig(u64, msg[i..][0..8]);
y0 ^= mem.readIntBig(u64, msg[i..][8..16]);
const y1r = @bitReverse(u64, y1);
const y0r = @bitReverse(u64, y0);
const y1r = @bitReverse(y1);
const y0r = @bitReverse(y0);
const y2 = y0 ^ y1;
const y2r = y0r ^ y1r;
@ -172,8 +172,8 @@ pub const Ghash = struct {
const sy1 = mem.readIntBig(u64, msg[i..][16..24]);
const sy0 = mem.readIntBig(u64, msg[i..][24..32]);
const sy1r = @bitReverse(u64, sy1);
const sy0r = @bitReverse(u64, sy0);
const sy1r = @bitReverse(sy1);
const sy0r = @bitReverse(sy0);
const sy2 = sy0 ^ sy1;
const sy2r = sy0r ^ sy1r;
@ -191,9 +191,9 @@ pub const Ghash = struct {
z0h ^= sz0h;
z1h ^= sz1h;
z2h ^= sz2h;
z0h = @bitReverse(u64, z0h) >> 1;
z1h = @bitReverse(u64, z1h) >> 1;
z2h = @bitReverse(u64, z2h) >> 1;
z0h = @bitReverse(z0h) >> 1;
z1h = @bitReverse(z1h) >> 1;
z2h = @bitReverse(z2h) >> 1;
var v3 = z1h;
var v2 = z1 ^ z2h;
@ -217,8 +217,8 @@ pub const Ghash = struct {
y1 ^= mem.readIntBig(u64, msg[i..][0..8]);
y0 ^= mem.readIntBig(u64, msg[i..][8..16]);
const y1r = @bitReverse(u64, y1);
const y0r = @bitReverse(u64, y0);
const y1r = @bitReverse(y1);
const y0r = @bitReverse(y0);
const y2 = y0 ^ y1;
const y2r = y0r ^ y1r;
@ -228,9 +228,9 @@ pub const Ghash = struct {
var z0h = clmul(y0r, st.h0r);
var z1h = clmul(y1r, st.h1r);
var z2h = clmul(y2r, st.h2r) ^ z0h ^ z1h;
z0h = @bitReverse(u64, z0h) >> 1;
z1h = @bitReverse(u64, z1h) >> 1;
z2h = @bitReverse(u64, z2h) >> 1;
z0h = @bitReverse(z0h) >> 1;
z1h = @bitReverse(z1h) >> 1;
z2h = @bitReverse(z2h) >> 1;
// shift & reduce
var v3 = z1h;

View File

@ -387,7 +387,7 @@ pub const Header = struct {
const machine = if (need_bswap) blk: {
const value = @enumToInt(hdr32.e_machine);
break :blk @intToEnum(EM, @byteSwap(@TypeOf(value), value));
break :blk @intToEnum(EM, @byteSwap(value));
} else hdr32.e_machine;
return @as(Header, .{
@ -511,7 +511,7 @@ pub fn SectionHeaderIterator(ParseSource: anytype) type {
pub fn int(is_64: bool, need_bswap: bool, int_32: anytype, int_64: anytype) @TypeOf(int_64) {
if (is_64) {
if (need_bswap) {
return @byteSwap(@TypeOf(int_64), int_64);
return @byteSwap(int_64);
} else {
return int_64;
}
@ -522,7 +522,7 @@ pub fn int(is_64: bool, need_bswap: bool, int_32: anytype, int_64: anytype) @Typ
pub fn int32(need_bswap: bool, int_32: anytype, comptime Int64: anytype) Int64 {
if (need_bswap) {
return @byteSwap(@TypeOf(int_32), int_32);
return @byteSwap(int_32);
} else {
return int_32;
}

View File

@ -56,7 +56,7 @@ pub fn Channel(comptime T: type) type {
pub fn init(self: *SelfChannel, buffer: []T) void {
// The ring buffer implementation only works with power of 2 buffer sizes
// because of relying on subtracting across zero. For example (0 -% 1) % 10 == 5
assert(buffer.len == 0 or @popCount(usize, buffer.len) == 1);
assert(buffer.len == 0 or @popCount(buffer.len) == 1);
self.* = SelfChannel{
.buffer_len = 0,

View File

@ -195,7 +195,7 @@ pub fn format(
}
if (comptime arg_state.hasUnusedArgs()) {
const missing_count = arg_state.args_len - @popCount(ArgSetType, arg_state.used_args);
const missing_count = arg_state.args_len - @popCount(arg_state.used_args);
switch (missing_count) {
0 => unreachable,
1 => @compileError("unused argument in '" ++ fmt ++ "'"),
@ -380,7 +380,7 @@ const ArgState = struct {
args_len: usize,
fn hasUnusedArgs(self: *@This()) bool {
return @popCount(ArgSetType, self.used_args) != self.args_len;
return @popCount(self.used_args) != self.args_len;
}
fn nextArg(self: *@This(), arg_index: ?usize) ?usize {

View File

@ -36,7 +36,7 @@ pub fn convertEiselLemire(comptime T: type, q: i64, w_: u64) ?BiasedFp(f64) {
}
// Normalize our significant digits, so the most-significant bit is set.
const lz = @clz(u64, @bitCast(u64, w));
const lz = @clz(@bitCast(u64, w));
w = math.shl(u64, w, lz);
const r = computeProductApprox(q, w, float_info.mantissa_explicit_bits + 3);

View File

@ -143,9 +143,9 @@ pub const CityHash32 = struct {
h = rotr32(h, 19);
h = h *% 5 +% 0xe6546b64;
g ^= b4;
g = @byteSwap(u32, g) *% 5;
g = @byteSwap(g) *% 5;
h +%= b4 *% 5;
h = @byteSwap(u32, h);
h = @byteSwap(h);
f +%= b0;
const t: u32 = h;
h = f;
@ -252,11 +252,11 @@ pub const CityHash64 = struct {
const u: u64 = rotr64(a +% g, 43) +% (rotr64(b, 30) +% c) *% 9;
const v: u64 = ((a +% g) ^ d) +% f +% 1;
const w: u64 = @byteSwap(u64, (u +% v) *% mul) +% h;
const w: u64 = @byteSwap((u +% v) *% mul) +% h;
const x: u64 = rotr64(e +% f, 42) +% c;
const y: u64 = (@byteSwap(u64, (v +% w) *% mul) +% g) *% mul;
const y: u64 = (@byteSwap((v +% w) *% mul) +% g) *% mul;
const z: u64 = e +% f +% c;
const a1: u64 = @byteSwap(u64, (x +% z) *% mul +% y) +% b;
const a1: u64 = @byteSwap((x +% z) *% mul +% y) +% b;
const b1: u64 = shiftmix((z +% a1) *% mul +% d +% h) *% mul;
return b1 +% x;
}

View File

@ -19,7 +19,7 @@ pub const Murmur2_32 = struct {
for (@ptrCast([*]align(1) const u32, str.ptr)[0..(len >> 2)]) |v| {
var k1: u32 = v;
if (native_endian == .Big)
k1 = @byteSwap(u32, k1);
k1 = @byteSwap(k1);
k1 *%= m;
k1 ^= k1 >> 24;
k1 *%= m;
@ -104,7 +104,7 @@ pub const Murmur2_64 = struct {
for (@ptrCast([*]align(1) const u64, str.ptr)[0..@intCast(usize, len >> 3)]) |v| {
var k1: u64 = v;
if (native_endian == .Big)
k1 = @byteSwap(u64, k1);
k1 = @byteSwap(k1);
k1 *%= m;
k1 ^= k1 >> 47;
k1 *%= m;
@ -117,7 +117,7 @@ pub const Murmur2_64 = struct {
var k1: u64 = 0;
@memcpy(@ptrCast([*]u8, &k1), @ptrCast([*]const u8, &str[@intCast(usize, offset)]), @intCast(usize, rest));
if (native_endian == .Big)
k1 = @byteSwap(u64, k1);
k1 = @byteSwap(k1);
h1 ^= k1;
h1 *%= m;
}
@ -184,7 +184,7 @@ pub const Murmur3_32 = struct {
for (@ptrCast([*]align(1) const u32, str.ptr)[0..(len >> 2)]) |v| {
var k1: u32 = v;
if (native_endian == .Big)
k1 = @byteSwap(u32, k1);
k1 = @byteSwap(k1);
k1 *%= c1;
k1 = rotl32(k1, 15);
k1 *%= c2;
@ -296,7 +296,7 @@ fn SMHasherTest(comptime hash_fn: anytype, comptime hashbits: u32) u32 {
var h = hash_fn(key[0..i], 256 - i);
if (native_endian == .Big)
h = @byteSwap(@TypeOf(h), h);
h = @byteSwap(h);
@memcpy(@ptrCast([*]u8, &hashes[i * hashbytes]), @ptrCast([*]u8, &h), hashbytes);
}
@ -310,8 +310,8 @@ test "murmur2_32" {
var v0le: u32 = v0;
var v1le: u64 = v1;
if (native_endian == .Big) {
v0le = @byteSwap(u32, v0le);
v1le = @byteSwap(u64, v1le);
v0le = @byteSwap(v0le);
v1le = @byteSwap(v1le);
}
try testing.expectEqual(Murmur2_32.hash(@ptrCast([*]u8, &v0le)[0..4]), Murmur2_32.hashUint32(v0));
try testing.expectEqual(Murmur2_32.hash(@ptrCast([*]u8, &v1le)[0..8]), Murmur2_32.hashUint64(v1));
@ -324,8 +324,8 @@ test "murmur2_64" {
var v0le: u32 = v0;
var v1le: u64 = v1;
if (native_endian == .Big) {
v0le = @byteSwap(u32, v0le);
v1le = @byteSwap(u64, v1le);
v0le = @byteSwap(v0le);
v1le = @byteSwap(v1le);
}
try testing.expectEqual(Murmur2_64.hash(@ptrCast([*]u8, &v0le)[0..4]), Murmur2_64.hashUint32(v0));
try testing.expectEqual(Murmur2_64.hash(@ptrCast([*]u8, &v1le)[0..8]), Murmur2_64.hashUint64(v1));
@ -338,8 +338,8 @@ test "murmur3_32" {
var v0le: u32 = v0;
var v1le: u64 = v1;
if (native_endian == .Big) {
v0le = @byteSwap(u32, v0le);
v1le = @byteSwap(u64, v1le);
v0le = @byteSwap(v0le);
v1le = @byteSwap(v1le);
}
try testing.expectEqual(Murmur3_32.hash(@ptrCast([*]u8, &v0le)[0..4]), Murmur3_32.hashUint32(v0));
try testing.expectEqual(Murmur3_32.hash(@ptrCast([*]u8, &v1le)[0..8]), Murmur3_32.hashUint64(v1));

View File

@ -479,7 +479,7 @@ const WasmPageAllocator = struct {
@setCold(true);
for (self.data) |segment, i| {
const spills_into_next = @bitCast(i128, segment) < 0;
const has_enough_bits = @popCount(u128, segment) >= num_pages;
const has_enough_bits = @popCount(segment) >= num_pages;
if (!spills_into_next and !has_enough_bits) continue;
@ -1185,7 +1185,7 @@ pub fn testAllocatorLargeAlignment(base_allocator: mem.Allocator) !void {
const large_align = @as(u29, mem.page_size << 2);
var align_mask: usize = undefined;
_ = @shlWithOverflow(usize, ~@as(usize, 0), @as(USizeShift, @ctz(u29, large_align)), &align_mask);
_ = @shlWithOverflow(usize, ~@as(usize, 0), @as(USizeShift, @ctz(large_align)), &align_mask);
var slice = try allocator.alignedAlloc(u8, large_align, 500);
try testing.expect(@ptrToInt(slice.ptr) & align_mask == @ptrToInt(slice.ptr));

View File

@ -317,7 +317,7 @@ fn test_write_leb128(value: anytype) !void {
const bytes_needed = bn: {
if (@typeInfo(T).Int.bits <= 7) break :bn @as(u16, 1);
const unused_bits = if (value < 0) @clz(T, ~value) else @clz(T, value);
const unused_bits = if (value < 0) @clz(~value) else @clz(value);
const used_bits: u16 = (@typeInfo(T).Int.bits - unused_bits) + @boolToInt(t_signed);
if (used_bits <= 7) break :bn @as(u16, 1);
break :bn ((used_bits + 6) / 7);

View File

@ -1146,7 +1146,7 @@ pub fn ceilPowerOfTwoPromote(comptime T: type, value: T) std.meta.Int(@typeInfo(
assert(value != 0);
const PromotedType = std.meta.Int(@typeInfo(T).Int.signedness, @typeInfo(T).Int.bits + 1);
const ShiftType = std.math.Log2Int(PromotedType);
return @as(PromotedType, 1) << @intCast(ShiftType, @typeInfo(T).Int.bits - @clz(T, value - 1));
return @as(PromotedType, 1) << @intCast(ShiftType, @typeInfo(T).Int.bits - @clz(value - 1));
}
/// Returns the next power of two (if the value is not already a power of two).
@ -1212,7 +1212,7 @@ pub fn log2_int(comptime T: type, x: T) Log2Int(T) {
if (@typeInfo(T) != .Int or @typeInfo(T).Int.signedness != .unsigned)
@compileError("log2_int requires an unsigned integer, found " ++ @typeName(T));
assert(x != 0);
return @intCast(Log2Int(T), @typeInfo(T).Int.bits - 1 - @clz(T, x));
return @intCast(Log2Int(T), @typeInfo(T).Int.bits - 1 - @clz(x));
}
/// Return the log base 2 of integer value x, rounding up to the

View File

@ -887,7 +887,7 @@ pub const Mutable = struct {
var sum: Limb = 0;
for (r.limbs[0..r.len]) |limb| {
sum += @popCount(Limb, limb);
sum += @popCount(limb);
}
r.set(sum);
}
@ -1520,7 +1520,7 @@ pub const Mutable = struct {
) void {
// 0.
// Normalize so that y[t] > b/2
const lz = @clz(Limb, y.limbs[y.len - 1]);
const lz = @clz(y.limbs[y.len - 1]);
const norm_shift = if (lz == 0 and y.toConst().isOdd())
limb_bits // Force an extra limb so that y is even.
else
@ -1917,7 +1917,7 @@ pub const Const = struct {
/// Returns the number of bits required to represent the absolute value of an integer.
pub fn bitCountAbs(self: Const) usize {
return (self.limbs.len - 1) * limb_bits + (limb_bits - @clz(Limb, self.limbs[self.limbs.len - 1]));
return (self.limbs.len - 1) * limb_bits + (limb_bits - @clz(self.limbs[self.limbs.len - 1]));
}
/// Returns the number of bits required to represent the integer in twos-complement form.
@ -1936,9 +1936,9 @@ pub const Const = struct {
if (!self.positive) block: {
bits += 1;
if (@popCount(Limb, self.limbs[self.limbs.len - 1]) == 1) {
if (@popCount(self.limbs[self.limbs.len - 1]) == 1) {
for (self.limbs[0 .. self.limbs.len - 1]) |limb| {
if (@popCount(Limb, limb) != 0) {
if (@popCount(limb) != 0) {
break :block;
}
}
@ -3895,8 +3895,8 @@ fn llpow(r: []Limb, a: []const Limb, b: u32, tmp_limbs: []Limb) void {
// The initial assignment makes the result end in `r` so an extra memory
// copy is saved, each 1 flips the index twice so it's only the zeros that
// matter.
const b_leading_zeros = @clz(u32, b);
const exp_zeros = @popCount(u32, ~b) - b_leading_zeros;
const b_leading_zeros = @clz(b);
const exp_zeros = @popCount(~b) - b_leading_zeros;
if (exp_zeros & 1 != 0) {
tmp1 = tmp_limbs;
tmp2 = r;

View File

@ -1319,7 +1319,7 @@ pub fn readIntNative(comptime T: type, bytes: *const [@divExact(@typeInfo(T).Int
/// This function cannot fail and cannot cause undefined behavior.
/// Assumes the endianness of memory is foreign, so it must byte-swap.
pub fn readIntForeign(comptime T: type, bytes: *const [@divExact(@typeInfo(T).Int.bits, 8)]u8) T {
return @byteSwap(T, readIntNative(T, bytes));
return @byteSwap(readIntNative(T, bytes));
}
pub const readIntLittle = switch (native_endian) {
@ -1348,7 +1348,7 @@ pub fn readIntSliceNative(comptime T: type, bytes: []const u8) T {
/// The bit count of T must be evenly divisible by 8.
/// Assumes the endianness of memory is foreign, so it must byte-swap.
pub fn readIntSliceForeign(comptime T: type, bytes: []const u8) T {
return @byteSwap(T, readIntSliceNative(T, bytes));
return @byteSwap(readIntSliceNative(T, bytes));
}
pub const readIntSliceLittle = switch (native_endian) {
@ -1430,7 +1430,7 @@ pub fn writeIntNative(comptime T: type, buf: *[(@typeInfo(T).Int.bits + 7) / 8]u
/// the integer bit width must be divisible by 8.
/// This function stores in foreign endian, which means it does a @byteSwap first.
pub fn writeIntForeign(comptime T: type, buf: *[@divExact(@typeInfo(T).Int.bits, 8)]u8, value: T) void {
writeIntNative(T, buf, @byteSwap(T, value));
writeIntNative(T, buf, @byteSwap(value));
}
pub const writeIntLittle = switch (native_endian) {
@ -1575,7 +1575,7 @@ pub const bswapAllFields = @compileError("bswapAllFields has been renamed to byt
pub fn byteSwapAllFields(comptime S: type, ptr: *S) void {
if (@typeInfo(S) != .Struct) @compileError("byteSwapAllFields expects a struct as the first argument");
inline for (std.meta.fields(S)) |f| {
@field(ptr, f.name) = @byteSwap(f.field_type, @field(ptr, f.name));
@field(ptr, f.name) = @byteSwap(@field(ptr, f.name));
}
}
@ -2752,14 +2752,14 @@ test "replaceOwned" {
pub fn littleToNative(comptime T: type, x: T) T {
return switch (native_endian) {
.Little => x,
.Big => @byteSwap(T, x),
.Big => @byteSwap(x),
};
}
/// Converts a big-endian integer to host endianness.
pub fn bigToNative(comptime T: type, x: T) T {
return switch (native_endian) {
.Little => @byteSwap(T, x),
.Little => @byteSwap(x),
.Big => x,
};
}
@ -2784,14 +2784,14 @@ pub fn nativeTo(comptime T: type, x: T, desired_endianness: Endian) T {
pub fn nativeToLittle(comptime T: type, x: T) T {
return switch (native_endian) {
.Little => x,
.Big => @byteSwap(T, x),
.Big => @byteSwap(x),
};
}
/// Converts an integer which has host endianness to big endian.
pub fn nativeToBig(comptime T: type, x: T) T {
return switch (native_endian) {
.Little => @byteSwap(T, x),
.Little => @byteSwap(x),
.Big => x,
};
}
@ -2803,7 +2803,7 @@ pub fn nativeToBig(comptime T: type, x: T) T {
/// - The delta required to align the pointer is not a multiple of the pointee's
/// type.
pub fn alignPointerOffset(ptr: anytype, align_to: u29) ?usize {
assert(align_to != 0 and @popCount(u29, align_to) == 1);
assert(align_to != 0 and @popCount(align_to) == 1);
const T = @TypeOf(ptr);
const info = @typeInfo(T);
@ -3293,7 +3293,7 @@ test "alignForward" {
/// Round an address up to the previous aligned address
/// Unlike `alignBackward`, `alignment` can be any positive number, not just a power of 2.
pub fn alignBackwardAnyAlign(i: usize, alignment: usize) usize {
if (@popCount(usize, alignment) == 1)
if (@popCount(alignment) == 1)
return alignBackward(i, alignment);
assert(alignment != 0);
return i - @mod(i, alignment);
@ -3308,7 +3308,7 @@ pub fn alignBackward(addr: usize, alignment: usize) usize {
/// Round an address up to the previous aligned address
/// The alignment must be a power of 2 and greater than 0.
pub fn alignBackwardGeneric(comptime T: type, addr: T, alignment: T) T {
assert(@popCount(T, alignment) == 1);
assert(@popCount(alignment) == 1);
// 000010000 // example alignment
// 000001111 // subtract 1
// 111110000 // binary not
@ -3318,11 +3318,11 @@ pub fn alignBackwardGeneric(comptime T: type, addr: T, alignment: T) T {
/// Returns whether `alignment` is a valid alignment, meaning it is
/// a positive power of 2.
pub fn isValidAlign(alignment: u29) bool {
return @popCount(u29, alignment) == 1;
return @popCount(alignment) == 1;
}
pub fn isAlignedAnyAlign(i: usize, alignment: usize) bool {
if (@popCount(usize, alignment) == 1)
if (@popCount(alignment) == 1)
return isAligned(i, alignment);
assert(alignment != 0);
return 0 == @mod(i, alignment);

View File

@ -3377,7 +3377,7 @@ pub const cpu_count_t = std.meta.Int(.unsigned, std.math.log2(CPU_SETSIZE * 8));
pub fn CPU_COUNT(set: cpu_set_t) cpu_count_t {
var sum: cpu_count_t = 0;
for (set) |x| {
sum += @popCount(usize, x);
sum += @popCount(x);
}
return sum;
}

View File

@ -55,9 +55,9 @@ pub const Guid = extern struct {
if (f.len == 0) {
const fmt = std.fmt.fmtSliceHexLower;
const time_low = @byteSwap(u32, self.time_low);
const time_mid = @byteSwap(u16, self.time_mid);
const time_high_and_version = @byteSwap(u16, self.time_high_and_version);
const time_low = @byteSwap(self.time_low);
const time_mid = @byteSwap(self.time_mid);
const time_high_and_version = @byteSwap(self.time_high_and_version);
return std.fmt.format(writer, "{:0>8}-{:0>4}-{:0>4}-{:0>2}{:0>2}-{:0>12}", .{
fmt(std.mem.asBytes(&time_low)),

View File

@ -76,7 +76,7 @@ pub fn PackedIntIo(comptime Int: type, comptime endian: Endian) type {
const value_ptr = @ptrCast(*align(1) const Container, &bytes[start_byte]);
var value = value_ptr.*;
if (endian != native_endian) value = @byteSwap(Container, value);
if (endian != native_endian) value = @byteSwap(value);
switch (endian) {
.Big => {
@ -126,7 +126,7 @@ pub fn PackedIntIo(comptime Int: type, comptime endian: Endian) type {
const target_ptr = @ptrCast(*align(1) Container, &bytes[start_byte]);
var target = target_ptr.*;
if (endian != native_endian) target = @byteSwap(Container, target);
if (endian != native_endian) target = @byteSwap(target);
//zero the bits we want to replace in the existing bytes
const inv_mask = @intCast(Container, std.math.maxInt(UnInt)) << keep_shift;
@ -136,7 +136,7 @@ pub fn PackedIntIo(comptime Int: type, comptime endian: Endian) type {
//merge the new value
target |= value;
if (endian != native_endian) target = @byteSwap(Container, target);
if (endian != native_endian) target = @byteSwap(target);
//save it back
target_ptr.* = target;

View File

@ -69,7 +69,7 @@ pub fn PriorityDequeue(comptime T: type, comptime Context: type, comptime compar
// The first element is on a min layer;
// next two are on a max layer;
// next four are on a min layer, and so on.
const leading_zeros = @clz(usize, index + 1);
const leading_zeros = @clz(index + 1);
const highest_set_bit = @bitSizeOf(usize) - 1 - leading_zeros;
return (highest_set_bit & 1) == 0;
}

View File

@ -257,15 +257,15 @@ pub const Random = struct {
// If all 41 bits are zero, generate additional random bits, until a
// set bit is found, or 126 bits have been generated.
const rand = r.int(u64);
var rand_lz = @clz(u64, rand);
var rand_lz = @clz(rand);
if (rand_lz >= 41) {
// TODO: when #5177 or #489 is implemented,
// tell the compiler it is unlikely (1/2^41) to reach this point.
// (Same for the if branch and the f64 calculations below.)
rand_lz = 41 + @clz(u64, r.int(u64));
rand_lz = 41 + @clz(r.int(u64));
if (rand_lz == 41 + 64) {
// It is astronomically unlikely to reach this point.
rand_lz += @clz(u32, r.int(u32) | 0x7FF);
rand_lz += @clz(r.int(u32) | 0x7FF);
}
}
const mantissa = @truncate(u23, rand);
@ -277,12 +277,12 @@ pub const Random = struct {
// If all 12 bits are zero, generate additional random bits, until a
// set bit is found, or 1022 bits have been generated.
const rand = r.int(u64);
var rand_lz: u64 = @clz(u64, rand);
var rand_lz: u64 = @clz(rand);
if (rand_lz >= 12) {
rand_lz = 12;
while (true) {
// It is astronomically unlikely for this loop to execute more than once.
const addl_rand_lz = @clz(u64, r.int(u64));
const addl_rand_lz = @clz(r.int(u64));
rand_lz += addl_rand_lz;
if (addl_rand_lz != 64) {
break;

View File

@ -1,13 +1,13 @@
const std = @import("std");
pub inline fn __builtin_bswap16(val: u16) u16 {
return @byteSwap(u16, val);
return @byteSwap(val);
}
pub inline fn __builtin_bswap32(val: u32) u32 {
return @byteSwap(u32, val);
return @byteSwap(val);
}
pub inline fn __builtin_bswap64(val: u64) u64 {
return @byteSwap(u64, val);
return @byteSwap(val);
}
pub inline fn __builtin_signbit(val: f64) c_int {
@ -20,19 +20,19 @@ pub inline fn __builtin_signbitf(val: f32) c_int {
pub inline fn __builtin_popcount(val: c_uint) c_int {
// popcount of a c_uint will never exceed the capacity of a c_int
@setRuntimeSafety(false);
return @bitCast(c_int, @as(c_uint, @popCount(c_uint, val)));
return @bitCast(c_int, @as(c_uint, @popCount(val)));
}
pub inline fn __builtin_ctz(val: c_uint) c_int {
// Returns the number of trailing 0-bits in val, starting at the least significant bit position.
// In C if `val` is 0, the result is undefined; in zig it's the number of bits in a c_uint
@setRuntimeSafety(false);
return @bitCast(c_int, @as(c_uint, @ctz(c_uint, val)));
return @bitCast(c_int, @as(c_uint, @ctz(val)));
}
pub inline fn __builtin_clz(val: c_uint) c_int {
// Returns the number of leading 0-bits in x, starting at the most significant bit position.
// In C if `val` is 0, the result is undefined; in zig it's the number of bits in a c_uint
@setRuntimeSafety(false);
return @bitCast(c_int, @as(c_uint, @clz(c_uint, val)));
return @bitCast(c_int, @as(c_uint, @clz(val)));
}
pub inline fn __builtin_sqrt(val: f64) f64 {

View File

@ -852,13 +852,13 @@ pub const LdInfo = struct {
pub fn elfInt(is_64: bool, need_bswap: bool, int_32: anytype, int_64: anytype) @TypeOf(int_64) {
if (is_64) {
if (need_bswap) {
return @byteSwap(@TypeOf(int_64), int_64);
return @byteSwap(int_64);
} else {
return int_64;
}
} else {
if (need_bswap) {
return @byteSwap(@TypeOf(int_32), int_32);
return @byteSwap(int_32);
} else {
return int_32;
}

View File

@ -7733,11 +7733,11 @@ fn builtinCall(
.has_decl => return hasDeclOrField(gz, scope, rl, node, params[0], params[1], .has_decl),
.has_field => return hasDeclOrField(gz, scope, rl, node, params[0], params[1], .has_field),
.clz => return bitBuiltin(gz, scope, rl, node, params[0], params[1], .clz),
.ctz => return bitBuiltin(gz, scope, rl, node, params[0], params[1], .ctz),
.pop_count => return bitBuiltin(gz, scope, rl, node, params[0], params[1], .pop_count),
.byte_swap => return bitBuiltin(gz, scope, rl, node, params[0], params[1], .byte_swap),
.bit_reverse => return bitBuiltin(gz, scope, rl, node, params[0], params[1], .bit_reverse),
.clz => return bitBuiltin(gz, scope, rl, node, params[0], .clz),
.ctz => return bitBuiltin(gz, scope, rl, node, params[0], .ctz),
.pop_count => return bitBuiltin(gz, scope, rl, node, params[0], .pop_count),
.byte_swap => return bitBuiltin(gz, scope, rl, node, params[0], .byte_swap),
.bit_reverse => return bitBuiltin(gz, scope, rl, node, params[0], .bit_reverse),
.div_exact => return divBuiltin(gz, scope, rl, node, params[0], params[1], .div_exact),
.div_floor => return divBuiltin(gz, scope, rl, node, params[0], params[1], .div_floor),
@ -8100,17 +8100,9 @@ fn bitBuiltin(
scope: *Scope,
rl: ResultLoc,
node: Ast.Node.Index,
int_type_node: Ast.Node.Index,
operand_node: Ast.Node.Index,
tag: Zir.Inst.Tag,
) InnerError!Zir.Inst.Ref {
// The accepted proposal https://github.com/ziglang/zig/issues/6835
// tells us to remove the type parameter from these builtins. To stay
// source-compatible with stage1, we still observe the parameter here,
// but we do not encode it into the ZIR. To implement this proposal in
// stage2, only AstGen code will need to be changed.
_ = try typeExpr(gz, scope, int_type_node);
const operand = try expr(gz, scope, .none, operand_node);
const result = try gz.addUnNode(tag, operand, node);
return rvalue(gz, rl, result, node);

View File

@ -250,14 +250,14 @@ pub const list = list: {
"@byteSwap",
.{
.tag = .byte_swap,
.param_count = 2,
.param_count = 1,
},
},
.{
"@bitReverse",
.{
.tag = .bit_reverse,
.param_count = 2,
.param_count = 1,
},
},
.{
@ -301,7 +301,7 @@ pub const list = list: {
"@clz",
.{
.tag = .clz,
.param_count = 2,
.param_count = 1,
},
},
.{
@ -336,7 +336,7 @@ pub const list = list: {
"@ctz",
.{
.tag = .ctz,
.param_count = 2,
.param_count = 1,
},
},
.{
@ -614,7 +614,7 @@ pub const list = list: {
"@popCount",
.{
.tag = .pop_count,
.param_count = 2,
.param_count = 1,
},
},
.{

View File

@ -13032,7 +13032,7 @@ fn analyzePtrArithmetic(
// The resulting pointer is aligned to the lcd between the offset (an
// arbitrary number) and the alignment factor (always a power of two,
// non zero).
const new_align = @as(u32, 1) << @intCast(u5, @ctz(u64, addend | ptr_info.@"align"));
const new_align = @as(u32, 1) << @intCast(u5, @ctz(addend | ptr_info.@"align"));
break :t try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = ptr_info.pointee_type,
@ -17781,7 +17781,7 @@ fn zirBitCount(
) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const operand = try sema.resolveInst(inst_data.operand);
const operand_ty = sema.typeOf(operand);
_ = try checkIntOrVector(sema, block, operand, operand_src);
@ -17833,17 +17833,16 @@ fn zirBitCount(
fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const operand = try sema.resolveInst(inst_data.operand);
const operand_ty = sema.typeOf(operand);
const scalar_ty = try sema.checkIntOrVectorAllowComptime(block, operand_ty, operand_src);
const scalar_ty = try sema.checkIntOrVector(block, operand, operand_src);
const target = sema.mod.getTarget();
const bits = scalar_ty.intInfo(target).bits;
if (bits % 8 != 0) {
return sema.fail(
block,
ty_src,
operand_src,
"@byteSwap requires the number of bits to be evenly divisible by 8, but {} has {} bits",
.{ scalar_ty.fmt(sema.mod), bits },
);
@ -17854,7 +17853,7 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
switch (operand_ty.zigTypeTag()) {
.Int, .ComptimeInt => {
.Int => {
const runtime_src = if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| {
if (val.isUndef()) return sema.addConstUndef(operand_ty);
const result_val = try val.byteSwap(operand_ty, target, sema.arena);
@ -17892,7 +17891,7 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const operand = try sema.resolveInst(inst_data.operand);
const operand_ty = sema.typeOf(operand);
_ = try sema.checkIntOrVectorAllowComptime(block, operand_ty, operand_src);
@ -21656,7 +21655,7 @@ fn structFieldPtrByIndex(
const elem_size_bits = ptr_ty_data.pointee_type.bitSize(target);
if (elem_size_bytes * 8 == elem_size_bits) {
const byte_offset = ptr_ty_data.bit_offset / 8;
const new_align = @as(u32, 1) << @intCast(u5, @ctz(u64, byte_offset | parent_align));
const new_align = @as(u32, 1) << @intCast(u5, @ctz(byte_offset | parent_align));
ptr_ty_data.bit_offset = 0;
ptr_ty_data.host_size = 0;
ptr_ty_data.@"align" = new_align;
@ -30426,7 +30425,7 @@ fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type {
// The resulting pointer is aligned to the lcd between the offset (an
// arbitrary number) and the alignment factor (always a power of two,
// non zero).
const new_align = @as(u32, 1) << @intCast(u5, @ctz(u64, addend | ptr_info.@"align"));
const new_align = @as(u32, 1) << @intCast(u5, @ctz(addend | ptr_info.@"align"));
break :a new_align;
};
return try Type.ptr(sema.arena, sema.mod, .{

View File

@ -277,7 +277,7 @@ fn instructionSize(emit: *Emit, inst: Mir.Inst.Index) usize {
=> return 2 * 4,
.pop_regs, .push_regs => {
const reg_list = emit.mir.instructions.items(.data)[inst].reg_list;
const number_of_regs = @popCount(u32, reg_list);
const number_of_regs = @popCount(reg_list);
const number_of_insts = std.math.divCeil(u6, number_of_regs, 2) catch unreachable;
return number_of_insts * 4;
},
@ -1183,7 +1183,7 @@ fn mirPushPopRegs(emit: *Emit, inst: Mir.Inst.Index) !void {
// sp must be aligned at all times, so we only use stp and ldp
// instructions for minimal instruction count. However, if we do
// not have an even number of registers, we use str and ldr
const number_of_regs = @popCount(u32, reg_list);
const number_of_regs = @popCount(reg_list);
switch (tag) {
.pop_regs => {

View File

@ -343,7 +343,7 @@ fn emitMemArg(emit: *Emit, tag: Mir.Inst.Tag, inst: Mir.Inst.Index) !void {
try emit.code.append(@enumToInt(tag));
// wasm encodes alignment as power of 2, rather than natural alignment
const encoded_alignment = @ctz(u32, mem_arg.alignment);
const encoded_alignment = @ctz(mem_arg.alignment);
try leb128.writeULEB128(emit.code.writer(), encoded_alignment);
try leb128.writeULEB128(emit.code.writer(), mem_arg.offset);
}

View File

@ -391,7 +391,7 @@ pub fn splitIntoAtomsOneShot(self: *Object, macho_file: *MachO, object_id: u32)
break :blk cc[start..][0..size];
} else null;
const atom_align = if (addr > 0)
math.min(@ctz(u64, addr), sect.@"align")
math.min(@ctz(addr), sect.@"align")
else
sect.@"align";
const atom = try self.createAtomFromSubsection(

View File

@ -3135,12 +3135,12 @@ fn emitSegmentInfo(self: *Wasm, file: fs.File, arena: Allocator) !void {
for (self.segment_info.items) |segment_info| {
log.debug("Emit segment: {s} align({d}) flags({b})", .{
segment_info.name,
@ctz(u32, segment_info.alignment),
@ctz(segment_info.alignment),
segment_info.flags,
});
try leb.writeULEB128(writer, @intCast(u32, segment_info.name.len));
try writer.writeAll(segment_info.name);
try leb.writeULEB128(writer, @ctz(u32, segment_info.alignment));
try leb.writeULEB128(writer, @ctz(segment_info.alignment));
try leb.writeULEB128(writer, segment_info.flags);
}

View File

@ -5374,10 +5374,8 @@ static Stage1ZirInst *astgen_builtin_fn_call(Stage1AstGen *ag, Scope *scope, Ast
if (arg0_value == ag->codegen->invalid_inst_src)
return arg0_value;
AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
Stage1ZirInst *arg1_value = astgen_node(ag, arg1_node, scope);
if (arg1_value == ag->codegen->invalid_inst_src)
return arg1_value;
Stage1ZirInst *arg1_value = arg0_value;
arg0_value = ir_build_typeof_1(ag, scope, arg0_node, arg1_value);
Stage1ZirInst *result;
switch (builtin_fn->id) {

View File

@ -9792,11 +9792,11 @@ static void define_builtin_fns(CodeGen *g) {
create_builtin_fn(g, BuiltinFnIdCInclude, "cInclude", 1);
create_builtin_fn(g, BuiltinFnIdCDefine, "cDefine", 2);
create_builtin_fn(g, BuiltinFnIdCUndef, "cUndef", 1);
create_builtin_fn(g, BuiltinFnIdCtz, "ctz", 2);
create_builtin_fn(g, BuiltinFnIdClz, "clz", 2);
create_builtin_fn(g, BuiltinFnIdPopCount, "popCount", 2);
create_builtin_fn(g, BuiltinFnIdBswap, "byteSwap", 2);
create_builtin_fn(g, BuiltinFnIdBitReverse, "bitReverse", 2);
create_builtin_fn(g, BuiltinFnIdCtz, "ctz", 1);
create_builtin_fn(g, BuiltinFnIdClz, "clz", 1);
create_builtin_fn(g, BuiltinFnIdPopCount, "popCount", 1);
create_builtin_fn(g, BuiltinFnIdBswap, "byteSwap", 1);
create_builtin_fn(g, BuiltinFnIdBitReverse, "bitReverse", 1);
create_builtin_fn(g, BuiltinFnIdImport, "import", 1);
create_builtin_fn(g, BuiltinFnIdCImport, "cImport", 1);
create_builtin_fn(g, BuiltinFnIdErrName, "errorName", 1);

View File

@ -1582,7 +1582,7 @@ pub const Value = extern union {
.one, .bool_true => return ty_bits - 1,
.int_u64 => {
const big = @clz(u64, val.castTag(.int_u64).?.data);
const big = @clz(val.castTag(.int_u64).?.data);
return big + ty_bits - 64;
},
.int_i64 => {
@ -1599,7 +1599,7 @@ pub const Value = extern union {
while (i != 0) {
i -= 1;
const limb = bigint.limbs[i];
const this_limb_lz = @clz(std.math.big.Limb, limb);
const this_limb_lz = @clz(limb);
total_limb_lz += this_limb_lz;
if (this_limb_lz != bits_per_limb) break;
}
@ -1626,7 +1626,7 @@ pub const Value = extern union {
.one, .bool_true => return 0,
.int_u64 => {
const big = @ctz(u64, val.castTag(.int_u64).?.data);
const big = @ctz(val.castTag(.int_u64).?.data);
return if (big == 64) ty_bits else big;
},
.int_i64 => {
@ -1638,7 +1638,7 @@ pub const Value = extern union {
// Limbs are stored in little-endian order.
var result: u64 = 0;
for (bigint.limbs) |limb| {
const limb_tz = @ctz(std.math.big.Limb, limb);
const limb_tz = @ctz(limb);
result += limb_tz;
if (limb_tz != @sizeOf(std.math.big.Limb) * 8) break;
}
@ -1663,7 +1663,7 @@ pub const Value = extern union {
.zero, .bool_false => return 0,
.one, .bool_true => return 1,
.int_u64 => return @popCount(u64, val.castTag(.int_u64).?.data),
.int_u64 => return @popCount(val.castTag(.int_u64).?.data),
else => {
const info = ty.intInfo(target);

View File

@ -8,7 +8,7 @@ test "@bitReverse large exotic integer" {
// Currently failing on stage1 for big-endian targets
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
try expect(@bitReverse(u95, @as(u95, 0x123456789abcdef111213141)) == 0x4146424447bd9eac8f351624);
try expect(@bitReverse(@as(u95, 0x123456789abcdef111213141)) == 0x4146424447bd9eac8f351624);
}
test "@bitReverse" {
@ -23,74 +23,74 @@ test "@bitReverse" {
fn testBitReverse() !void {
// using comptime_ints, unsigned
try expect(@bitReverse(u0, @as(u0, 0)) == 0);
try expect(@bitReverse(u5, @as(u5, 0x12)) == 0x9);
try expect(@bitReverse(u8, @as(u8, 0x12)) == 0x48);
try expect(@bitReverse(u16, @as(u16, 0x1234)) == 0x2c48);
try expect(@bitReverse(u24, @as(u24, 0x123456)) == 0x6a2c48);
try expect(@bitReverse(u32, @as(u32, 0x12345678)) == 0x1e6a2c48);
try expect(@bitReverse(u40, @as(u40, 0x123456789a)) == 0x591e6a2c48);
try expect(@bitReverse(u48, @as(u48, 0x123456789abc)) == 0x3d591e6a2c48);
try expect(@bitReverse(u56, @as(u56, 0x123456789abcde)) == 0x7b3d591e6a2c48);
try expect(@bitReverse(u64, @as(u64, 0x123456789abcdef1)) == 0x8f7b3d591e6a2c48);
try expect(@bitReverse(u96, @as(u96, 0x123456789abcdef111213141)) == 0x828c84888f7b3d591e6a2c48);
try expect(@bitReverse(u128, @as(u128, 0x123456789abcdef11121314151617181)) == 0x818e868a828c84888f7b3d591e6a2c48);
try expect(@bitReverse(@as(u0, 0)) == 0);
try expect(@bitReverse(@as(u5, 0x12)) == 0x9);
try expect(@bitReverse(@as(u8, 0x12)) == 0x48);
try expect(@bitReverse(@as(u16, 0x1234)) == 0x2c48);
try expect(@bitReverse(@as(u24, 0x123456)) == 0x6a2c48);
try expect(@bitReverse(@as(u32, 0x12345678)) == 0x1e6a2c48);
try expect(@bitReverse(@as(u40, 0x123456789a)) == 0x591e6a2c48);
try expect(@bitReverse(@as(u48, 0x123456789abc)) == 0x3d591e6a2c48);
try expect(@bitReverse(@as(u56, 0x123456789abcde)) == 0x7b3d591e6a2c48);
try expect(@bitReverse(@as(u64, 0x123456789abcdef1)) == 0x8f7b3d591e6a2c48);
try expect(@bitReverse(@as(u96, 0x123456789abcdef111213141)) == 0x828c84888f7b3d591e6a2c48);
try expect(@bitReverse(@as(u128, 0x123456789abcdef11121314151617181)) == 0x818e868a828c84888f7b3d591e6a2c48);
// using runtime uints, unsigned
var num0: u0 = 0;
try expect(@bitReverse(u0, num0) == 0);
try expect(@bitReverse(num0) == 0);
var num5: u5 = 0x12;
try expect(@bitReverse(u5, num5) == 0x9);
try expect(@bitReverse(num5) == 0x9);
var num8: u8 = 0x12;
try expect(@bitReverse(u8, num8) == 0x48);
try expect(@bitReverse(num8) == 0x48);
var num16: u16 = 0x1234;
try expect(@bitReverse(u16, num16) == 0x2c48);
try expect(@bitReverse(num16) == 0x2c48);
var num24: u24 = 0x123456;
try expect(@bitReverse(u24, num24) == 0x6a2c48);
try expect(@bitReverse(num24) == 0x6a2c48);
var num32: u32 = 0x12345678;
try expect(@bitReverse(u32, num32) == 0x1e6a2c48);
try expect(@bitReverse(num32) == 0x1e6a2c48);
var num40: u40 = 0x123456789a;
try expect(@bitReverse(u40, num40) == 0x591e6a2c48);
try expect(@bitReverse(num40) == 0x591e6a2c48);
var num48: u48 = 0x123456789abc;
try expect(@bitReverse(u48, num48) == 0x3d591e6a2c48);
try expect(@bitReverse(num48) == 0x3d591e6a2c48);
var num56: u56 = 0x123456789abcde;
try expect(@bitReverse(u56, num56) == 0x7b3d591e6a2c48);
try expect(@bitReverse(num56) == 0x7b3d591e6a2c48);
var num64: u64 = 0x123456789abcdef1;
try expect(@bitReverse(u64, num64) == 0x8f7b3d591e6a2c48);
try expect(@bitReverse(num64) == 0x8f7b3d591e6a2c48);
var num128: u128 = 0x123456789abcdef11121314151617181;
try expect(@bitReverse(u128, num128) == 0x818e868a828c84888f7b3d591e6a2c48);
try expect(@bitReverse(num128) == 0x818e868a828c84888f7b3d591e6a2c48);
// using comptime_ints, signed, positive
try expect(@bitReverse(u8, @as(u8, 0)) == 0);
try expect(@bitReverse(i8, @bitCast(i8, @as(u8, 0x92))) == @bitCast(i8, @as(u8, 0x49)));
try expect(@bitReverse(i16, @bitCast(i16, @as(u16, 0x1234))) == @bitCast(i16, @as(u16, 0x2c48)));
try expect(@bitReverse(i24, @bitCast(i24, @as(u24, 0x123456))) == @bitCast(i24, @as(u24, 0x6a2c48)));
try expect(@bitReverse(i24, @bitCast(i24, @as(u24, 0x12345f))) == @bitCast(i24, @as(u24, 0xfa2c48)));
try expect(@bitReverse(i24, @bitCast(i24, @as(u24, 0xf23456))) == @bitCast(i24, @as(u24, 0x6a2c4f)));
try expect(@bitReverse(i32, @bitCast(i32, @as(u32, 0x12345678))) == @bitCast(i32, @as(u32, 0x1e6a2c48)));
try expect(@bitReverse(i32, @bitCast(i32, @as(u32, 0xf2345678))) == @bitCast(i32, @as(u32, 0x1e6a2c4f)));
try expect(@bitReverse(i32, @bitCast(i32, @as(u32, 0x1234567f))) == @bitCast(i32, @as(u32, 0xfe6a2c48)));
try expect(@bitReverse(i40, @bitCast(i40, @as(u40, 0x123456789a))) == @bitCast(i40, @as(u40, 0x591e6a2c48)));
try expect(@bitReverse(i48, @bitCast(i48, @as(u48, 0x123456789abc))) == @bitCast(i48, @as(u48, 0x3d591e6a2c48)));
try expect(@bitReverse(i56, @bitCast(i56, @as(u56, 0x123456789abcde))) == @bitCast(i56, @as(u56, 0x7b3d591e6a2c48)));
try expect(@bitReverse(i64, @bitCast(i64, @as(u64, 0x123456789abcdef1))) == @bitCast(i64, @as(u64, 0x8f7b3d591e6a2c48)));
try expect(@bitReverse(i96, @bitCast(i96, @as(u96, 0x123456789abcdef111213141))) == @bitCast(i96, @as(u96, 0x828c84888f7b3d591e6a2c48)));
try expect(@bitReverse(i128, @bitCast(i128, @as(u128, 0x123456789abcdef11121314151617181))) == @bitCast(i128, @as(u128, 0x818e868a828c84888f7b3d591e6a2c48)));
try expect(@bitReverse(@as(u8, 0)) == 0);
try expect(@bitReverse(@bitCast(i8, @as(u8, 0x92))) == @bitCast(i8, @as(u8, 0x49)));
try expect(@bitReverse(@bitCast(i16, @as(u16, 0x1234))) == @bitCast(i16, @as(u16, 0x2c48)));
try expect(@bitReverse(@bitCast(i24, @as(u24, 0x123456))) == @bitCast(i24, @as(u24, 0x6a2c48)));
try expect(@bitReverse(@bitCast(i24, @as(u24, 0x12345f))) == @bitCast(i24, @as(u24, 0xfa2c48)));
try expect(@bitReverse(@bitCast(i24, @as(u24, 0xf23456))) == @bitCast(i24, @as(u24, 0x6a2c4f)));
try expect(@bitReverse(@bitCast(i32, @as(u32, 0x12345678))) == @bitCast(i32, @as(u32, 0x1e6a2c48)));
try expect(@bitReverse(@bitCast(i32, @as(u32, 0xf2345678))) == @bitCast(i32, @as(u32, 0x1e6a2c4f)));
try expect(@bitReverse(@bitCast(i32, @as(u32, 0x1234567f))) == @bitCast(i32, @as(u32, 0xfe6a2c48)));
try expect(@bitReverse(@bitCast(i40, @as(u40, 0x123456789a))) == @bitCast(i40, @as(u40, 0x591e6a2c48)));
try expect(@bitReverse(@bitCast(i48, @as(u48, 0x123456789abc))) == @bitCast(i48, @as(u48, 0x3d591e6a2c48)));
try expect(@bitReverse(@bitCast(i56, @as(u56, 0x123456789abcde))) == @bitCast(i56, @as(u56, 0x7b3d591e6a2c48)));
try expect(@bitReverse(@bitCast(i64, @as(u64, 0x123456789abcdef1))) == @bitCast(i64, @as(u64, 0x8f7b3d591e6a2c48)));
try expect(@bitReverse(@bitCast(i96, @as(u96, 0x123456789abcdef111213141))) == @bitCast(i96, @as(u96, 0x828c84888f7b3d591e6a2c48)));
try expect(@bitReverse(@bitCast(i128, @as(u128, 0x123456789abcdef11121314151617181))) == @bitCast(i128, @as(u128, 0x818e868a828c84888f7b3d591e6a2c48)));
// using signed, negative. Compare to runtime ints returned from llvm.
var neg8: i8 = -18;
try expect(@bitReverse(i8, @as(i8, -18)) == @bitReverse(i8, neg8));
try expect(@bitReverse(@as(i8, -18)) == @bitReverse(neg8));
var neg16: i16 = -32694;
try expect(@bitReverse(i16, @as(i16, -32694)) == @bitReverse(i16, neg16));
try expect(@bitReverse(@as(i16, -32694)) == @bitReverse(neg16));
var neg24: i24 = -6773785;
try expect(@bitReverse(i24, @as(i24, -6773785)) == @bitReverse(i24, neg24));
try expect(@bitReverse(@as(i24, -6773785)) == @bitReverse(neg24));
var neg32: i32 = -16773785;
try expect(@bitReverse(i32, @as(i32, -16773785)) == @bitReverse(i32, neg32));
try expect(@bitReverse(@as(i32, -16773785)) == @bitReverse(neg32));
}
fn vector8() !void {
var v = @Vector(2, u8){ 0x12, 0x23 };
var result = @bitReverse(u8, v);
var result = @bitReverse(v);
try expect(result[0] == 0x48);
try expect(result[1] == 0xc4);
}
@ -109,7 +109,7 @@ test "bitReverse vectors u8" {
fn vector16() !void {
var v = @Vector(2, u16){ 0x1234, 0x2345 };
var result = @bitReverse(u16, v);
var result = @bitReverse(v);
try expect(result[0] == 0x2c48);
try expect(result[1] == 0xa2c4);
}
@ -128,7 +128,7 @@ test "bitReverse vectors u16" {
fn vector24() !void {
var v = @Vector(2, u24){ 0x123456, 0x234567 };
var result = @bitReverse(u24, v);
var result = @bitReverse(v);
try expect(result[0] == 0x6a2c48);
try expect(result[1] == 0xe6a2c4);
}
@ -147,7 +147,7 @@ test "bitReverse vectors u24" {
fn vector0() !void {
var v = @Vector(2, u0){ 0, 0 };
var result = @bitReverse(u0, v);
var result = @bitReverse(v);
try expect(result[0] == 0);
try expect(result[1] == 0);
}

View File

@ -2,6 +2,7 @@ const builtin = @import("builtin");
const std = @import("std");
test "uses correct LLVM builtin" {
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@ -12,8 +13,8 @@ test "uses correct LLVM builtin" {
var y: @Vector(4, u32) = [_]u32{ 0x1, 0x1, 0x1, 0x1 };
// The stage1 compiler used to call the same builtin function for both
// scalar and vector inputs, causing the LLVM module verification to fail.
var a = @clz(u32, x);
var b = @clz(u32, y);
var a = @clz(x);
var b = @clz(y);
try std.testing.expectEqual(@as(u6, 31), a);
try std.testing.expectEqual([_]u6{ 31, 31, 31, 31 }, b);
}

View File

@ -4,7 +4,7 @@ const expect = std.testing.expect;
const math = std.math;
fn ctz(x: anytype) usize {
return @ctz(@TypeOf(x), x);
return @ctz(x);
}
test "fixed" {

View File

@ -3,6 +3,7 @@ const builtin = @import("builtin");
const expect = std.testing.expect;
test "@byteSwap integers" {
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
@ -46,7 +47,7 @@ test "@byteSwap integers" {
);
}
fn t(comptime I: type, input: I, expected_output: I) !void {
try std.testing.expect(expected_output == @byteSwap(I, input));
try std.testing.expect(expected_output == @byteSwap(input));
}
};
comptime try ByteSwapIntTest.run();
@ -55,12 +56,13 @@ test "@byteSwap integers" {
fn vector8() !void {
var v = @Vector(2, u8){ 0x12, 0x13 };
var result = @byteSwap(u8, v);
var result = @byteSwap(v);
try expect(result[0] == 0x12);
try expect(result[1] == 0x13);
}
test "@byteSwap vectors u8" {
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
@ -73,12 +75,13 @@ test "@byteSwap vectors u8" {
fn vector16() !void {
var v = @Vector(2, u16){ 0x1234, 0x2345 };
var result = @byteSwap(u16, v);
var result = @byteSwap(v);
try expect(result[0] == 0x3412);
try expect(result[1] == 0x4523);
}
test "@byteSwap vectors u16" {
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
@ -91,12 +94,13 @@ test "@byteSwap vectors u16" {
fn vector24() !void {
var v = @Vector(2, u24){ 0x123456, 0x234567 };
var result = @byteSwap(u24, v);
var result = @byteSwap(v);
try expect(result[0] == 0x563412);
try expect(result[1] == 0x674523);
}
test "@byteSwap vectors u24" {
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
@ -109,12 +113,13 @@ test "@byteSwap vectors u24" {
fn vector0() !void {
var v = @Vector(2, u0){ 0, 0 };
var result = @byteSwap(u0, v);
var result = @byteSwap(v);
try expect(result[0] == 0);
try expect(result[1] == 0);
}
test "@byteSwap vectors u0" {
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;

View File

@ -82,7 +82,7 @@ test "type pun value and struct" {
}
fn bigToNativeEndian(comptime T: type, v: T) T {
return if (endian == .Big) v else @byteSwap(T, v);
return if (endian == .Big) v else @byteSwap(v);
}
test "type pun endianness" {
if (builtin.zig_backend == .stage1) return error.SkipZigTest;

View File

@ -90,10 +90,11 @@ fn testClzBigInts() !void {
}
fn testOneClz(comptime T: type, x: T) u32 {
return @clz(T, x);
return @clz(x);
}
test "@clz vectors" {
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@ -120,7 +121,7 @@ fn testOneClzVector(
x: @Vector(len, T),
expected: @Vector(len, u32),
) !void {
try expectVectorsEqual(@clz(T, x), expected);
try expectVectorsEqual(@clz(x), expected);
}
fn expectVectorsEqual(a: anytype, b: anytype) !void {
@ -151,19 +152,18 @@ fn testCtz() !void {
}
fn testOneCtz(comptime T: type, x: T) u32 {
return @ctz(T, x);
return @ctz(x);
}
test "@ctz vectors" {
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if ((builtin.zig_backend == .stage1 or builtin.zig_backend == .stage2_llvm) and
builtin.cpu.arch == .aarch64)
{
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) {
// This regressed with LLVM 14:
// https://github.com/ziglang/zig/issues/12013
return error.SkipZigTest;
@ -187,7 +187,7 @@ fn testOneCtzVector(
x: @Vector(len, T),
expected: @Vector(len, u32),
) !void {
try expectVectorsEqual(@ctz(T, x), expected);
try expectVectorsEqual(@ctz(x), expected);
}
test "const number literal" {

View File

@ -18,53 +18,54 @@ test "@popCount 128bit integer" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
comptime {
try expect(@popCount(u128, @as(u128, 0b11111111000110001100010000100001000011000011100101010001)) == 24);
try expect(@popCount(i128, @as(i128, 0b11111111000110001100010000100001000011000011100101010001)) == 24);
try expect(@popCount(@as(u128, 0b11111111000110001100010000100001000011000011100101010001)) == 24);
try expect(@popCount(@as(i128, 0b11111111000110001100010000100001000011000011100101010001)) == 24);
}
{
var x: u128 = 0b11111111000110001100010000100001000011000011100101010001;
try expect(@popCount(u128, x) == 24);
try expect(@popCount(x) == 24);
}
try expect(@popCount(i128, @as(i128, 0b11111111000110001100010000100001000011000011100101010001)) == 24);
try expect(@popCount(@as(i128, 0b11111111000110001100010000100001000011000011100101010001)) == 24);
}
fn testPopCountIntegers() !void {
{
var x: u32 = 0xffffffff;
try expect(@popCount(u32, x) == 32);
try expect(@popCount(x) == 32);
}
{
var x: u5 = 0x1f;
try expect(@popCount(u5, x) == 5);
try expect(@popCount(x) == 5);
}
{
var x: u32 = 0xaa;
try expect(@popCount(u32, x) == 4);
try expect(@popCount(x) == 4);
}
{
var x: u32 = 0xaaaaaaaa;
try expect(@popCount(u32, x) == 16);
try expect(@popCount(x) == 16);
}
{
var x: u32 = 0xaaaaaaaa;
try expect(@popCount(u32, x) == 16);
try expect(@popCount(x) == 16);
}
{
var x: i16 = -1;
try expect(@popCount(i16, x) == 16);
try expect(@popCount(x) == 16);
}
{
var x: i8 = -120;
try expect(@popCount(i8, x) == 2);
try expect(@popCount(x) == 2);
}
comptime {
try expect(@popCount(u8, @bitCast(u8, @as(i8, -120))) == 2);
try expect(@popCount(@bitCast(u8, @as(i8, -120))) == 2);
}
}
test "@popCount vectors" {
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@ -79,13 +80,13 @@ fn testPopCountVectors() !void {
{
var x: @Vector(8, u32) = [1]u32{0xffffffff} ** 8;
const expected = [1]u6{32} ** 8;
const result: [8]u6 = @popCount(u32, x);
const result: [8]u6 = @popCount(x);
try expect(std.mem.eql(u6, &expected, &result));
}
{
var x: @Vector(8, i16) = [1]i16{-1} ** 8;
const expected = [1]u5{16} ** 8;
const result: [8]u5 = @popCount(i16, x);
const result: [8]u5 = @popCount(x);
try expect(std.mem.eql(u5, &expected, &result));
}
}

View File

@ -1,10 +1,10 @@
pub export fn entry() void {
var arr: [100]u8 = undefined;
for (arr) |bits| _ = @popCount(bits);
for (arr) |bits| _ = @popCount(u8, bits);
}
// error
// backend=stage2
// target=native
//
// :3:26: error: expected 2 arguments, found 1
// :3:26: error: expected 1 argument, found 2

View File

@ -1,9 +1,9 @@
export fn entry(x: f32) u32 {
return @popCount(f32, x);
return @popCount(x);
}
// error
// backend=stage2
// target=native
//
// :2:27: error: expected integer or vector, found 'f32'
// :2:22: error: expected integer or vector, found 'f32'

View File

@ -299,11 +299,11 @@ fn renderBitEnum(
for (enumerants) |enumerant, i| {
if (enumerant.value != .bitflag) return error.InvalidRegistry;
const value = try parseHexInt(enumerant.value.bitflag);
if (@popCount(u32, value) == 0) {
if (@popCount(value) == 0) {
continue; // Skip 'none' items
}
std.debug.assert(@popCount(u32, value) == 1);
std.debug.assert(@popCount(value) == 1);
var bitpos = std.math.log2_int(u32, value);
if (flags_by_bitpos[bitpos]) |*existing| {

View File

@ -389,7 +389,7 @@ fn parseElf(parse: Parse, comptime is_64: bool, comptime endian: builtin.Endian)
const S = struct {
fn endianSwap(x: anytype) @TypeOf(x) {
if (endian != native_endian) {
return @byteSwap(@TypeOf(x), x);
return @byteSwap(x);
} else {
return x;
}