forked from Minki/linux
Random number generator fixes for Linux 6.1-rc1.
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEq5lC5tSkz8NBJiCnSfxwEqXeA64FAmNHYD0ACgkQSfxwEqXe A655AA//dJK0PdRghqrKQsl18GOCffV5TUw5i1VbJQbI9d8anfxNjVUQiNGZi4et qUwZ8OqVXxYx1Z1UDgUE39PjEDSG9/cCvOpMUWqN20/+6955WlNZjwA7Fk6zjvlM R30fz5CIJns9RFvGT4SwKqbVLXIMvfg/wDENUN+8sxt36+VD2gGol7J2JJdngEhM lW+zqzi0ABqYy5so4TU2kixpKmpC08rqFvQbD1GPid+50+JsOiIqftDErt9Eg1Mg MqYivoFCvbAlxxxRh3+UHBd7ZpJLtp1UFEOl2Rf00OXO+ZclLCAQAsTczucIWK9M 8LCZjb7d4lPJv9RpXFAl3R1xvfc+Uy2ga5KeXvufZtc5G3aMUKPuIU7k28ZyblVS XXsXEYhjTSd0tgi3d0JlValrIreSuj0z2QGT5pVcC9utuAqAqRIlosiPmgPlzXjr Us4jXaUhOIPKI+Musv/fqrxsTQziT0jgVA3Njlt4cuAGm/EeUbLUkMWwKXjZLTsv vDsBhEQFmyZqxWu4pYo534VX2mQWTaKRV1SUVVhQEHm57b00EAiZohoOvweB09SR 4KiJapikoopmW4oAUFotUXUL1PM6yi+MXguTuc1SEYuLz/tCFtK8DJVwNpfnWZpE lZKvXyJnHq2Sgod/hEZq58PMvT6aNzTzSg7YzZy+VabxQGOO5mc= =M+mV -----END PGP SIGNATURE----- Merge tag 'random-6.1-rc1-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/crng/random Pull more random number generator updates from Jason Donenfeld: "This time with some large scale treewide cleanups. The intent of this pull is to clean up the way callers fetch random integers. The current rules for doing this right are: - If you want a secure or an insecure random u64, use get_random_u64() - If you want a secure or an insecure random u32, use get_random_u32() The old function prandom_u32() has been deprecated for a while now and is just a wrapper around get_random_u32(). Same for get_random_int(). - If you want a secure or an insecure random u16, use get_random_u16() - If you want a secure or an insecure random u8, use get_random_u8() - If you want secure or insecure random bytes, use get_random_bytes(). The old function prandom_bytes() has been deprecated for a while now and has long been a wrapper around get_random_bytes() - If you want a non-uniform random u32, u16, or u8 bounded by a certain open interval maximum, use prandom_u32_max() I say "non-uniform", because it doesn't do any rejection sampling or divisions. Hence, it stays within the prandom_*() namespace, not the get_random_*() namespace. I'm currently investigating a "uniform" function for 6.2. We'll see what comes of that. By applying these rules uniformly, we get several benefits: - By using prandom_u32_max() with an upper-bound that the compiler can prove at compile-time is ≤65536 or ≤256, internally get_random_u16() or get_random_u8() is used, which wastes fewer batched random bytes, and hence has higher throughput. - By using prandom_u32_max() instead of %, when the upper-bound is not a constant, division is still avoided, because prandom_u32_max() uses a faster multiplication-based trick instead. - By using get_random_u16() or get_random_u8() in cases where the return value is intended to indeed be a u16 or a u8, we waste fewer batched random bytes, and hence have higher throughput. This series was originally done by hand while I was on an airplane without Internet. Later, Kees and I worked on retroactively figuring out what could be done with Coccinelle and what had to be done manually, and then we split things up based on that. So while this touches a lot of files, the actual amount of code that's hand fiddled is comfortably small" * tag 'random-6.1-rc1-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/crng/random: prandom: remove unused functions treewide: use get_random_bytes() when possible treewide: use get_random_u32() when possible treewide: use get_random_{u8,u16}() when possible, part 2 treewide: use get_random_{u8,u16}() when possible, part 1 treewide: use prandom_u32_max() when possible, part 2 treewide: use prandom_u32_max() when possible, part 1
This commit is contained in:
commit
f1947d7c8a
@ -305,7 +305,7 @@ Possible BPF extensions are shown in the following table:
|
||||
vlan_tci skb_vlan_tag_get(skb)
|
||||
vlan_avail skb_vlan_tag_present(skb)
|
||||
vlan_tpid skb->vlan_proto
|
||||
rand prandom_u32()
|
||||
rand get_random_u32()
|
||||
=================================== =================================================
|
||||
|
||||
These extensions can also be prefixed with '#'.
|
||||
|
@ -371,7 +371,7 @@ static unsigned long sigpage_addr(const struct mm_struct *mm,
|
||||
|
||||
slots = ((last - first) >> PAGE_SHIFT) + 1;
|
||||
|
||||
offset = get_random_int() % slots;
|
||||
offset = prandom_u32_max(slots);
|
||||
|
||||
addr = first + (offset << PAGE_SHIFT);
|
||||
|
||||
|
@ -655,7 +655,7 @@ struct page *get_signal_page(void)
|
||||
PAGE_SIZE / sizeof(u32));
|
||||
|
||||
/* Give the signal return code some randomness */
|
||||
offset = 0x200 + (get_random_int() & 0x7fc);
|
||||
offset = 0x200 + (get_random_u16() & 0x7fc);
|
||||
signal_return_offset = offset;
|
||||
|
||||
/* Copy signal return handlers into the page */
|
||||
|
@ -591,7 +591,7 @@ unsigned long __get_wchan(struct task_struct *p)
|
||||
unsigned long arch_align_stack(unsigned long sp)
|
||||
{
|
||||
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
|
||||
sp -= get_random_int() & ~PAGE_MASK;
|
||||
sp -= prandom_u32_max(PAGE_SIZE);
|
||||
return sp & ~0xf;
|
||||
}
|
||||
|
||||
|
@ -67,7 +67,7 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno,
|
||||
*
|
||||
* The resulting 5 bits of entropy is seen in SP[8:4].
|
||||
*/
|
||||
choose_random_kstack_offset(get_random_int() & 0x1FF);
|
||||
choose_random_kstack_offset(get_random_u16() & 0x1FF);
|
||||
}
|
||||
|
||||
static inline bool has_syscall_work(unsigned long flags)
|
||||
|
@ -293,7 +293,7 @@ unsigned long stack_top(void)
|
||||
unsigned long arch_align_stack(unsigned long sp)
|
||||
{
|
||||
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
|
||||
sp -= get_random_int() & ~PAGE_MASK;
|
||||
sp -= prandom_u32_max(PAGE_SIZE);
|
||||
|
||||
return sp & STACK_ALIGN;
|
||||
}
|
||||
|
@ -78,7 +78,7 @@ static unsigned long vdso_base(void)
|
||||
unsigned long base = STACK_TOP;
|
||||
|
||||
if (current->flags & PF_RANDOMIZE) {
|
||||
base += get_random_int() & (VDSO_RANDOMIZE_SIZE - 1);
|
||||
base += prandom_u32_max(VDSO_RANDOMIZE_SIZE);
|
||||
base = PAGE_ALIGN(base);
|
||||
}
|
||||
|
||||
|
@ -711,7 +711,7 @@ unsigned long mips_stack_top(void)
|
||||
unsigned long arch_align_stack(unsigned long sp)
|
||||
{
|
||||
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
|
||||
sp -= get_random_int() & ~PAGE_MASK;
|
||||
sp -= prandom_u32_max(PAGE_SIZE);
|
||||
|
||||
return sp & ALMASK;
|
||||
}
|
||||
|
@ -79,7 +79,7 @@ static unsigned long vdso_base(void)
|
||||
}
|
||||
|
||||
if (current->flags & PF_RANDOMIZE) {
|
||||
base += get_random_int() & (VDSO_RANDOMIZE_SIZE - 1);
|
||||
base += prandom_u32_max(VDSO_RANDOMIZE_SIZE);
|
||||
base = PAGE_ALIGN(base);
|
||||
}
|
||||
|
||||
|
@ -284,7 +284,7 @@ __get_wchan(struct task_struct *p)
|
||||
|
||||
static inline unsigned long brk_rnd(void)
|
||||
{
|
||||
return (get_random_int() & BRK_RND_MASK) << PAGE_SHIFT;
|
||||
return (get_random_u32() & BRK_RND_MASK) << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
unsigned long arch_randomize_brk(struct mm_struct *mm)
|
||||
|
@ -239,14 +239,14 @@ static unsigned long mmap_rnd(void)
|
||||
unsigned long rnd = 0;
|
||||
|
||||
if (current->flags & PF_RANDOMIZE)
|
||||
rnd = get_random_int() & MMAP_RND_MASK;
|
||||
rnd = get_random_u32() & MMAP_RND_MASK;
|
||||
|
||||
return rnd << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
unsigned long arch_mmap_rnd(void)
|
||||
{
|
||||
return (get_random_int() & MMAP_RND_MASK) << PAGE_SHIFT;
|
||||
return (get_random_u32() & MMAP_RND_MASK) << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static unsigned long mmap_legacy_base(void)
|
||||
|
@ -75,7 +75,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
|
||||
|
||||
map_base = mm->mmap_base;
|
||||
if (current->flags & PF_RANDOMIZE)
|
||||
map_base -= (get_random_int() & 0x1f) * PAGE_SIZE;
|
||||
map_base -= prandom_u32_max(0x20) * PAGE_SIZE;
|
||||
|
||||
vdso_text_start = get_unmapped_area(NULL, map_base, vdso_text_len, 0, 0);
|
||||
|
||||
|
@ -82,7 +82,7 @@ static int __init crc_test_init(void)
|
||||
|
||||
if (len <= offset)
|
||||
continue;
|
||||
prandom_bytes(data, len);
|
||||
get_random_bytes(data, len);
|
||||
len -= offset;
|
||||
|
||||
crypto_shash_update(crct10dif_shash, data+offset, len);
|
||||
|
@ -2303,6 +2303,6 @@ void notrace __ppc64_runlatch_off(void)
|
||||
unsigned long arch_align_stack(unsigned long sp)
|
||||
{
|
||||
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
|
||||
sp -= get_random_int() & ~PAGE_MASK;
|
||||
sp -= prandom_u32_max(PAGE_SIZE);
|
||||
return sp & ~0xf;
|
||||
}
|
||||
|
@ -224,13 +224,13 @@ unsigned long __get_wchan(struct task_struct *p)
|
||||
unsigned long arch_align_stack(unsigned long sp)
|
||||
{
|
||||
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
|
||||
sp -= get_random_int() & ~PAGE_MASK;
|
||||
sp -= prandom_u32_max(PAGE_SIZE);
|
||||
return sp & ~0xf;
|
||||
}
|
||||
|
||||
static inline unsigned long brk_rnd(void)
|
||||
{
|
||||
return (get_random_int() & BRK_RND_MASK) << PAGE_SHIFT;
|
||||
return (get_random_u16() & BRK_RND_MASK) << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
unsigned long arch_randomize_brk(struct mm_struct *mm)
|
||||
|
@ -227,7 +227,7 @@ static unsigned long vdso_addr(unsigned long start, unsigned long len)
|
||||
end -= len;
|
||||
|
||||
if (end > start) {
|
||||
offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
|
||||
offset = prandom_u32_max(((end - start) >> PAGE_SHIFT) + 1);
|
||||
addr = start + (offset << PAGE_SHIFT);
|
||||
} else {
|
||||
addr = start;
|
||||
|
@ -37,7 +37,7 @@ static inline int mmap_is_legacy(struct rlimit *rlim_stack)
|
||||
|
||||
unsigned long arch_mmap_rnd(void)
|
||||
{
|
||||
return (get_random_int() & MMAP_RND_MASK) << PAGE_SHIFT;
|
||||
return (get_random_u32() & MMAP_RND_MASK) << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static unsigned long mmap_base_legacy(unsigned long rnd)
|
||||
|
@ -354,7 +354,7 @@ static unsigned long vdso_addr(unsigned long start, unsigned int len)
|
||||
unsigned int offset;
|
||||
|
||||
/* This loses some more bits than a modulo, but is cheaper */
|
||||
offset = get_random_int() & (PTRS_PER_PTE - 1);
|
||||
offset = prandom_u32_max(PTRS_PER_PTE);
|
||||
return start + (offset << PAGE_SHIFT);
|
||||
}
|
||||
|
||||
|
@ -356,7 +356,7 @@ int singlestepping(void * t)
|
||||
unsigned long arch_align_stack(unsigned long sp)
|
||||
{
|
||||
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
|
||||
sp -= get_random_int() % 8192;
|
||||
sp -= prandom_u32_max(8192);
|
||||
return sp & ~0xf;
|
||||
}
|
||||
#endif
|
||||
|
@ -327,7 +327,7 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
|
||||
end -= len;
|
||||
|
||||
if (end > start) {
|
||||
offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
|
||||
offset = prandom_u32_max(((end - start) >> PAGE_SHIFT) + 1);
|
||||
addr = start + (offset << PAGE_SHIFT);
|
||||
} else {
|
||||
addr = start;
|
||||
|
@ -503,7 +503,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
|
||||
va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
|
||||
|
||||
/* A random value per boot for bit slice [12:upper_bit) */
|
||||
va_align.bits = get_random_int() & va_align.mask;
|
||||
va_align.bits = get_random_u32() & va_align.mask;
|
||||
}
|
||||
|
||||
if (cpu_has(c, X86_FEATURE_MWAITX))
|
||||
|
@ -53,7 +53,7 @@ static unsigned long int get_module_load_offset(void)
|
||||
*/
|
||||
if (module_load_offset == 0)
|
||||
module_load_offset =
|
||||
(get_random_int() % 1024 + 1) * PAGE_SIZE;
|
||||
(prandom_u32_max(1024) + 1) * PAGE_SIZE;
|
||||
mutex_unlock(&module_kaslr_mutex);
|
||||
}
|
||||
return module_load_offset;
|
||||
|
@ -965,7 +965,7 @@ early_param("idle", idle_setup);
|
||||
unsigned long arch_align_stack(unsigned long sp)
|
||||
{
|
||||
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
|
||||
sp -= get_random_int() % 8192;
|
||||
sp -= prandom_u32_max(8192);
|
||||
return sp & ~0xf;
|
||||
}
|
||||
|
||||
|
@ -136,10 +136,10 @@ static int pageattr_test(void)
|
||||
failed += print_split(&sa);
|
||||
|
||||
for (i = 0; i < NTEST; i++) {
|
||||
unsigned long pfn = prandom_u32() % max_pfn_mapped;
|
||||
unsigned long pfn = prandom_u32_max(max_pfn_mapped);
|
||||
|
||||
addr[i] = (unsigned long)__va(pfn << PAGE_SHIFT);
|
||||
len[i] = prandom_u32() % NPAGES;
|
||||
len[i] = prandom_u32_max(NPAGES);
|
||||
len[i] = min_t(unsigned long, len[i], max_pfn_mapped - pfn - 1);
|
||||
|
||||
if (len[i] == 0)
|
||||
|
@ -539,7 +539,7 @@ static int blk_crypto_fallback_init(void)
|
||||
if (blk_crypto_fallback_inited)
|
||||
return 0;
|
||||
|
||||
prandom_bytes(blank_key, BLK_CRYPTO_MAX_KEY_SIZE);
|
||||
get_random_bytes(blank_key, BLK_CRYPTO_MAX_KEY_SIZE);
|
||||
|
||||
err = bioset_init(&crypto_bio_split, 64, 0, 0);
|
||||
if (err)
|
||||
|
@ -37,7 +37,7 @@ static void makedata(int disks)
|
||||
int i;
|
||||
|
||||
for (i = 0; i < disks; i++) {
|
||||
prandom_bytes(page_address(data[i]), PAGE_SIZE);
|
||||
get_random_bytes(page_address(data[i]), PAGE_SIZE);
|
||||
dataptrs[i] = data[i];
|
||||
dataoffs[i] = 0;
|
||||
}
|
||||
|
@ -855,9 +855,9 @@ static int prepare_keybuf(const u8 *key, unsigned int ksize,
|
||||
/* Generate a random length in range [0, max_len], but prefer smaller values */
|
||||
static unsigned int generate_random_length(unsigned int max_len)
|
||||
{
|
||||
unsigned int len = prandom_u32() % (max_len + 1);
|
||||
unsigned int len = prandom_u32_max(max_len + 1);
|
||||
|
||||
switch (prandom_u32() % 4) {
|
||||
switch (prandom_u32_max(4)) {
|
||||
case 0:
|
||||
return len % 64;
|
||||
case 1:
|
||||
@ -874,14 +874,14 @@ static void flip_random_bit(u8 *buf, size_t size)
|
||||
{
|
||||
size_t bitpos;
|
||||
|
||||
bitpos = prandom_u32() % (size * 8);
|
||||
bitpos = prandom_u32_max(size * 8);
|
||||
buf[bitpos / 8] ^= 1 << (bitpos % 8);
|
||||
}
|
||||
|
||||
/* Flip a random byte in the given nonempty data buffer */
|
||||
static void flip_random_byte(u8 *buf, size_t size)
|
||||
{
|
||||
buf[prandom_u32() % size] ^= 0xff;
|
||||
buf[prandom_u32_max(size)] ^= 0xff;
|
||||
}
|
||||
|
||||
/* Sometimes make some random changes to the given nonempty data buffer */
|
||||
@ -891,15 +891,15 @@ static void mutate_buffer(u8 *buf, size_t size)
|
||||
size_t i;
|
||||
|
||||
/* Sometimes flip some bits */
|
||||
if (prandom_u32() % 4 == 0) {
|
||||
num_flips = min_t(size_t, 1 << (prandom_u32() % 8), size * 8);
|
||||
if (prandom_u32_max(4) == 0) {
|
||||
num_flips = min_t(size_t, 1 << prandom_u32_max(8), size * 8);
|
||||
for (i = 0; i < num_flips; i++)
|
||||
flip_random_bit(buf, size);
|
||||
}
|
||||
|
||||
/* Sometimes flip some bytes */
|
||||
if (prandom_u32() % 4 == 0) {
|
||||
num_flips = min_t(size_t, 1 << (prandom_u32() % 8), size);
|
||||
if (prandom_u32_max(4) == 0) {
|
||||
num_flips = min_t(size_t, 1 << prandom_u32_max(8), size);
|
||||
for (i = 0; i < num_flips; i++)
|
||||
flip_random_byte(buf, size);
|
||||
}
|
||||
@ -915,11 +915,11 @@ static void generate_random_bytes(u8 *buf, size_t count)
|
||||
if (count == 0)
|
||||
return;
|
||||
|
||||
switch (prandom_u32() % 8) { /* Choose a generation strategy */
|
||||
switch (prandom_u32_max(8)) { /* Choose a generation strategy */
|
||||
case 0:
|
||||
case 1:
|
||||
/* All the same byte, plus optional mutations */
|
||||
switch (prandom_u32() % 4) {
|
||||
switch (prandom_u32_max(4)) {
|
||||
case 0:
|
||||
b = 0x00;
|
||||
break;
|
||||
@ -927,7 +927,7 @@ static void generate_random_bytes(u8 *buf, size_t count)
|
||||
b = 0xff;
|
||||
break;
|
||||
default:
|
||||
b = (u8)prandom_u32();
|
||||
b = get_random_u8();
|
||||
break;
|
||||
}
|
||||
memset(buf, b, count);
|
||||
@ -935,8 +935,8 @@ static void generate_random_bytes(u8 *buf, size_t count)
|
||||
break;
|
||||
case 2:
|
||||
/* Ascending or descending bytes, plus optional mutations */
|
||||
increment = (u8)prandom_u32();
|
||||
b = (u8)prandom_u32();
|
||||
increment = get_random_u8();
|
||||
b = get_random_u8();
|
||||
for (i = 0; i < count; i++, b += increment)
|
||||
buf[i] = b;
|
||||
mutate_buffer(buf, count);
|
||||
@ -944,7 +944,7 @@ static void generate_random_bytes(u8 *buf, size_t count)
|
||||
default:
|
||||
/* Fully random bytes */
|
||||
for (i = 0; i < count; i++)
|
||||
buf[i] = (u8)prandom_u32();
|
||||
buf[i] = get_random_u8();
|
||||
}
|
||||
}
|
||||
|
||||
@ -959,24 +959,24 @@ static char *generate_random_sgl_divisions(struct test_sg_division *divs,
|
||||
unsigned int this_len;
|
||||
const char *flushtype_str;
|
||||
|
||||
if (div == &divs[max_divs - 1] || prandom_u32() % 2 == 0)
|
||||
if (div == &divs[max_divs - 1] || prandom_u32_max(2) == 0)
|
||||
this_len = remaining;
|
||||
else
|
||||
this_len = 1 + (prandom_u32() % remaining);
|
||||
this_len = 1 + prandom_u32_max(remaining);
|
||||
div->proportion_of_total = this_len;
|
||||
|
||||
if (prandom_u32() % 4 == 0)
|
||||
div->offset = (PAGE_SIZE - 128) + (prandom_u32() % 128);
|
||||
else if (prandom_u32() % 2 == 0)
|
||||
div->offset = prandom_u32() % 32;
|
||||
if (prandom_u32_max(4) == 0)
|
||||
div->offset = (PAGE_SIZE - 128) + prandom_u32_max(128);
|
||||
else if (prandom_u32_max(2) == 0)
|
||||
div->offset = prandom_u32_max(32);
|
||||
else
|
||||
div->offset = prandom_u32() % PAGE_SIZE;
|
||||
if (prandom_u32() % 8 == 0)
|
||||
div->offset = prandom_u32_max(PAGE_SIZE);
|
||||
if (prandom_u32_max(8) == 0)
|
||||
div->offset_relative_to_alignmask = true;
|
||||
|
||||
div->flush_type = FLUSH_TYPE_NONE;
|
||||
if (gen_flushes) {
|
||||
switch (prandom_u32() % 4) {
|
||||
switch (prandom_u32_max(4)) {
|
||||
case 0:
|
||||
div->flush_type = FLUSH_TYPE_REIMPORT;
|
||||
break;
|
||||
@ -988,7 +988,7 @@ static char *generate_random_sgl_divisions(struct test_sg_division *divs,
|
||||
|
||||
if (div->flush_type != FLUSH_TYPE_NONE &&
|
||||
!(req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) &&
|
||||
prandom_u32() % 2 == 0)
|
||||
prandom_u32_max(2) == 0)
|
||||
div->nosimd = true;
|
||||
|
||||
switch (div->flush_type) {
|
||||
@ -1035,7 +1035,7 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
|
||||
|
||||
p += scnprintf(p, end - p, "random:");
|
||||
|
||||
switch (prandom_u32() % 4) {
|
||||
switch (prandom_u32_max(4)) {
|
||||
case 0:
|
||||
case 1:
|
||||
cfg->inplace_mode = OUT_OF_PLACE;
|
||||
@ -1050,12 +1050,12 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
|
||||
break;
|
||||
}
|
||||
|
||||
if (prandom_u32() % 2 == 0) {
|
||||
if (prandom_u32_max(2) == 0) {
|
||||
cfg->req_flags |= CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
p += scnprintf(p, end - p, " may_sleep");
|
||||
}
|
||||
|
||||
switch (prandom_u32() % 4) {
|
||||
switch (prandom_u32_max(4)) {
|
||||
case 0:
|
||||
cfg->finalization_type = FINALIZATION_TYPE_FINAL;
|
||||
p += scnprintf(p, end - p, " use_final");
|
||||
@ -1071,7 +1071,7 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
|
||||
}
|
||||
|
||||
if (!(cfg->req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) &&
|
||||
prandom_u32() % 2 == 0) {
|
||||
prandom_u32_max(2) == 0) {
|
||||
cfg->nosimd = true;
|
||||
p += scnprintf(p, end - p, " nosimd");
|
||||
}
|
||||
@ -1084,7 +1084,7 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
|
||||
cfg->req_flags);
|
||||
p += scnprintf(p, end - p, "]");
|
||||
|
||||
if (cfg->inplace_mode == OUT_OF_PLACE && prandom_u32() % 2 == 0) {
|
||||
if (cfg->inplace_mode == OUT_OF_PLACE && prandom_u32_max(2) == 0) {
|
||||
p += scnprintf(p, end - p, " dst_divs=[");
|
||||
p = generate_random_sgl_divisions(cfg->dst_divs,
|
||||
ARRAY_SIZE(cfg->dst_divs),
|
||||
@ -1093,13 +1093,13 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
|
||||
p += scnprintf(p, end - p, "]");
|
||||
}
|
||||
|
||||
if (prandom_u32() % 2 == 0) {
|
||||
cfg->iv_offset = 1 + (prandom_u32() % MAX_ALGAPI_ALIGNMASK);
|
||||
if (prandom_u32_max(2) == 0) {
|
||||
cfg->iv_offset = 1 + prandom_u32_max(MAX_ALGAPI_ALIGNMASK);
|
||||
p += scnprintf(p, end - p, " iv_offset=%u", cfg->iv_offset);
|
||||
}
|
||||
|
||||
if (prandom_u32() % 2 == 0) {
|
||||
cfg->key_offset = 1 + (prandom_u32() % MAX_ALGAPI_ALIGNMASK);
|
||||
if (prandom_u32_max(2) == 0) {
|
||||
cfg->key_offset = 1 + prandom_u32_max(MAX_ALGAPI_ALIGNMASK);
|
||||
p += scnprintf(p, end - p, " key_offset=%u", cfg->key_offset);
|
||||
}
|
||||
|
||||
@ -1652,8 +1652,8 @@ static void generate_random_hash_testvec(struct shash_desc *desc,
|
||||
vec->ksize = 0;
|
||||
if (maxkeysize) {
|
||||
vec->ksize = maxkeysize;
|
||||
if (prandom_u32() % 4 == 0)
|
||||
vec->ksize = 1 + (prandom_u32() % maxkeysize);
|
||||
if (prandom_u32_max(4) == 0)
|
||||
vec->ksize = 1 + prandom_u32_max(maxkeysize);
|
||||
generate_random_bytes((u8 *)vec->key, vec->ksize);
|
||||
|
||||
vec->setkey_error = crypto_shash_setkey(desc->tfm, vec->key,
|
||||
@ -2218,13 +2218,13 @@ static void mutate_aead_message(struct aead_testvec *vec, bool aad_iv,
|
||||
const unsigned int aad_tail_size = aad_iv ? ivsize : 0;
|
||||
const unsigned int authsize = vec->clen - vec->plen;
|
||||
|
||||
if (prandom_u32() % 2 == 0 && vec->alen > aad_tail_size) {
|
||||
if (prandom_u32_max(2) == 0 && vec->alen > aad_tail_size) {
|
||||
/* Mutate the AAD */
|
||||
flip_random_bit((u8 *)vec->assoc, vec->alen - aad_tail_size);
|
||||
if (prandom_u32() % 2 == 0)
|
||||
if (prandom_u32_max(2) == 0)
|
||||
return;
|
||||
}
|
||||
if (prandom_u32() % 2 == 0) {
|
||||
if (prandom_u32_max(2) == 0) {
|
||||
/* Mutate auth tag (assuming it's at the end of ciphertext) */
|
||||
flip_random_bit((u8 *)vec->ctext + vec->plen, authsize);
|
||||
} else {
|
||||
@ -2249,7 +2249,7 @@ static void generate_aead_message(struct aead_request *req,
|
||||
const unsigned int ivsize = crypto_aead_ivsize(tfm);
|
||||
const unsigned int authsize = vec->clen - vec->plen;
|
||||
const bool inauthentic = (authsize >= MIN_COLLISION_FREE_AUTHSIZE) &&
|
||||
(prefer_inauthentic || prandom_u32() % 4 == 0);
|
||||
(prefer_inauthentic || prandom_u32_max(4) == 0);
|
||||
|
||||
/* Generate the AAD. */
|
||||
generate_random_bytes((u8 *)vec->assoc, vec->alen);
|
||||
@ -2257,7 +2257,7 @@ static void generate_aead_message(struct aead_request *req,
|
||||
/* Avoid implementation-defined behavior. */
|
||||
memcpy((u8 *)vec->assoc + vec->alen - ivsize, vec->iv, ivsize);
|
||||
|
||||
if (inauthentic && prandom_u32() % 2 == 0) {
|
||||
if (inauthentic && prandom_u32_max(2) == 0) {
|
||||
/* Generate a random ciphertext. */
|
||||
generate_random_bytes((u8 *)vec->ctext, vec->clen);
|
||||
} else {
|
||||
@ -2321,8 +2321,8 @@ static void generate_random_aead_testvec(struct aead_request *req,
|
||||
|
||||
/* Key: length in [0, maxkeysize], but usually choose maxkeysize */
|
||||
vec->klen = maxkeysize;
|
||||
if (prandom_u32() % 4 == 0)
|
||||
vec->klen = prandom_u32() % (maxkeysize + 1);
|
||||
if (prandom_u32_max(4) == 0)
|
||||
vec->klen = prandom_u32_max(maxkeysize + 1);
|
||||
generate_random_bytes((u8 *)vec->key, vec->klen);
|
||||
vec->setkey_error = crypto_aead_setkey(tfm, vec->key, vec->klen);
|
||||
|
||||
@ -2331,8 +2331,8 @@ static void generate_random_aead_testvec(struct aead_request *req,
|
||||
|
||||
/* Tag length: in [0, maxauthsize], but usually choose maxauthsize */
|
||||
authsize = maxauthsize;
|
||||
if (prandom_u32() % 4 == 0)
|
||||
authsize = prandom_u32() % (maxauthsize + 1);
|
||||
if (prandom_u32_max(4) == 0)
|
||||
authsize = prandom_u32_max(maxauthsize + 1);
|
||||
if (prefer_inauthentic && authsize < MIN_COLLISION_FREE_AUTHSIZE)
|
||||
authsize = MIN_COLLISION_FREE_AUTHSIZE;
|
||||
if (WARN_ON(authsize > maxdatasize))
|
||||
@ -2342,7 +2342,7 @@ static void generate_random_aead_testvec(struct aead_request *req,
|
||||
|
||||
/* AAD, plaintext, and ciphertext lengths */
|
||||
total_len = generate_random_length(maxdatasize);
|
||||
if (prandom_u32() % 4 == 0)
|
||||
if (prandom_u32_max(4) == 0)
|
||||
vec->alen = 0;
|
||||
else
|
||||
vec->alen = generate_random_length(total_len);
|
||||
@ -2958,8 +2958,8 @@ static void generate_random_cipher_testvec(struct skcipher_request *req,
|
||||
|
||||
/* Key: length in [0, maxkeysize], but usually choose maxkeysize */
|
||||
vec->klen = maxkeysize;
|
||||
if (prandom_u32() % 4 == 0)
|
||||
vec->klen = prandom_u32() % (maxkeysize + 1);
|
||||
if (prandom_u32_max(4) == 0)
|
||||
vec->klen = prandom_u32_max(maxkeysize + 1);
|
||||
generate_random_bytes((u8 *)vec->key, vec->klen);
|
||||
vec->setkey_error = crypto_skcipher_setkey(tfm, vec->key, vec->klen);
|
||||
|
||||
|
@ -781,7 +781,7 @@ static struct socket *drbd_wait_for_connect(struct drbd_connection *connection,
|
||||
|
||||
timeo = connect_int * HZ;
|
||||
/* 28.5% random jitter */
|
||||
timeo += (prandom_u32() & 1) ? timeo / 7 : -timeo / 7;
|
||||
timeo += prandom_u32_max(2) ? timeo / 7 : -timeo / 7;
|
||||
|
||||
err = wait_for_completion_interruptible_timeout(&ad->door_bell, timeo);
|
||||
if (err <= 0)
|
||||
@ -1004,7 +1004,7 @@ retry:
|
||||
drbd_warn(connection, "Error receiving initial packet\n");
|
||||
sock_release(s);
|
||||
randomize:
|
||||
if (prandom_u32() & 1)
|
||||
if (prandom_u32_max(2))
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
|
@ -97,7 +97,7 @@ MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
|
||||
* Returns whether or not the input pool has been seeded and thus guaranteed
|
||||
* to supply cryptographically secure random numbers. This applies to: the
|
||||
* /dev/urandom device, the get_random_bytes function, and the get_random_{u8,
|
||||
* u16,u32,u64,int,long} family of functions.
|
||||
* u16,u32,u64,long} family of functions.
|
||||
*
|
||||
* Returns: true if the input pool has been seeded.
|
||||
* false if the input pool has not been seeded.
|
||||
@ -161,15 +161,14 @@ EXPORT_SYMBOL(wait_for_random_bytes);
|
||||
* u16 get_random_u16()
|
||||
* u32 get_random_u32()
|
||||
* u64 get_random_u64()
|
||||
* unsigned int get_random_int()
|
||||
* unsigned long get_random_long()
|
||||
*
|
||||
* These interfaces will return the requested number of random bytes
|
||||
* into the given buffer or as a return value. This is equivalent to
|
||||
* a read from /dev/urandom. The u8, u16, u32, u64, int, and long
|
||||
* family of functions may be higher performance for one-off random
|
||||
* integers, because they do a bit of buffering and do not invoke
|
||||
* reseeding until the buffer is emptied.
|
||||
* a read from /dev/urandom. The u8, u16, u32, u64, long family of
|
||||
* functions may be higher performance for one-off random integers,
|
||||
* because they do a bit of buffering and do not invoke reseeding
|
||||
* until the buffer is emptied.
|
||||
*
|
||||
*********************************************************************/
|
||||
|
||||
|
@ -312,7 +312,7 @@ static unsigned long dmatest_random(void)
|
||||
{
|
||||
unsigned long buf;
|
||||
|
||||
prandom_bytes(&buf, sizeof(buf));
|
||||
get_random_bytes(&buf, sizeof(buf));
|
||||
return buf;
|
||||
}
|
||||
|
||||
|
@ -2424,7 +2424,7 @@ gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
|
||||
/* Check whether the file_priv has already selected one ring. */
|
||||
if ((int)file_priv->bsd_engine < 0)
|
||||
file_priv->bsd_engine =
|
||||
get_random_int() % num_vcs_engines(dev_priv);
|
||||
prandom_u32_max(num_vcs_engines(dev_priv));
|
||||
|
||||
return file_priv->bsd_engine;
|
||||
}
|
||||
|
@ -137,12 +137,12 @@ static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
|
||||
range = round_down(end - len, align) - round_up(start, align);
|
||||
if (range) {
|
||||
if (sizeof(unsigned long) == sizeof(u64)) {
|
||||
addr = get_random_long();
|
||||
addr = get_random_u64();
|
||||
} else {
|
||||
addr = get_random_int();
|
||||
addr = get_random_u32();
|
||||
if (range > U32_MAX) {
|
||||
addr <<= 32;
|
||||
addr |= get_random_int();
|
||||
addr |= get_random_u32();
|
||||
}
|
||||
}
|
||||
div64_u64_rem(addr, range, &addr);
|
||||
|
@ -135,7 +135,7 @@ static int __run_selftests(const char *name,
|
||||
int err = 0;
|
||||
|
||||
while (!i915_selftest.random_seed)
|
||||
i915_selftest.random_seed = get_random_int();
|
||||
i915_selftest.random_seed = get_random_u32();
|
||||
|
||||
i915_selftest.timeout_jiffies =
|
||||
i915_selftest.timeout_ms ?
|
||||
|
@ -729,7 +729,7 @@ static void drm_test_buddy_alloc_limit(struct kunit *test)
|
||||
static int drm_buddy_init_test(struct kunit *test)
|
||||
{
|
||||
while (!random_seed)
|
||||
random_seed = get_random_int();
|
||||
random_seed = get_random_u32();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2212,7 +2212,7 @@ err_nodes:
|
||||
static int drm_mm_init_test(struct kunit *test)
|
||||
{
|
||||
while (!random_seed)
|
||||
random_seed = get_random_int();
|
||||
random_seed = get_random_u32();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -3807,7 +3807,7 @@ static int cma_alloc_any_port(enum rdma_ucm_port_space ps,
|
||||
|
||||
inet_get_local_port_range(net, &low, &high);
|
||||
remaining = (high - low) + 1;
|
||||
rover = prandom_u32() % remaining + low;
|
||||
rover = prandom_u32_max(remaining) + low;
|
||||
retry:
|
||||
if (last_used_port != rover) {
|
||||
struct rdma_bind_list *bind_list;
|
||||
|
@ -734,7 +734,7 @@ static int send_connect(struct c4iw_ep *ep)
|
||||
&ep->com.remote_addr;
|
||||
int ret;
|
||||
enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
|
||||
u32 isn = (prandom_u32() & ~7UL) - 1;
|
||||
u32 isn = (get_random_u32() & ~7UL) - 1;
|
||||
struct net_device *netdev;
|
||||
u64 params;
|
||||
|
||||
@ -2469,7 +2469,7 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
|
||||
}
|
||||
|
||||
if (!is_t4(adapter_type)) {
|
||||
u32 isn = (prandom_u32() & ~7UL) - 1;
|
||||
u32 isn = (get_random_u32() & ~7UL) - 1;
|
||||
|
||||
skb = get_skb(skb, roundup(sizeof(*rpl5), 16), GFP_KERNEL);
|
||||
rpl5 = __skb_put_zero(skb, roundup(sizeof(*rpl5), 16));
|
||||
|
@ -54,7 +54,7 @@ u32 c4iw_id_alloc(struct c4iw_id_table *alloc)
|
||||
|
||||
if (obj < alloc->max) {
|
||||
if (alloc->flags & C4IW_ID_TABLE_F_RANDOM)
|
||||
alloc->last += prandom_u32() % RANDOM_SKIP;
|
||||
alloc->last += prandom_u32_max(RANDOM_SKIP);
|
||||
else
|
||||
alloc->last = obj + 1;
|
||||
if (alloc->last >= alloc->max)
|
||||
@ -85,7 +85,7 @@ int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num,
|
||||
alloc->start = start;
|
||||
alloc->flags = flags;
|
||||
if (flags & C4IW_ID_TABLE_F_RANDOM)
|
||||
alloc->last = prandom_u32() % RANDOM_SKIP;
|
||||
alloc->last = prandom_u32_max(RANDOM_SKIP);
|
||||
else
|
||||
alloc->last = 0;
|
||||
alloc->max = num;
|
||||
|
@ -850,7 +850,7 @@ void hfi1_kern_init_ctxt_generations(struct hfi1_ctxtdata *rcd)
|
||||
int i;
|
||||
|
||||
for (i = 0; i < RXE_NUM_TID_FLOWS; i++) {
|
||||
rcd->flows[i].generation = mask_generation(prandom_u32());
|
||||
rcd->flows[i].generation = mask_generation(get_random_u32());
|
||||
kern_set_hw_flow(rcd, KERN_GENERATION_RESERVED, i);
|
||||
}
|
||||
}
|
||||
|
@ -41,9 +41,8 @@ static inline u16 get_ah_udp_sport(const struct rdma_ah_attr *ah_attr)
|
||||
u16 sport;
|
||||
|
||||
if (!fl)
|
||||
sport = get_random_u32() %
|
||||
(IB_ROCE_UDP_ENCAP_VALID_PORT_MAX + 1 -
|
||||
IB_ROCE_UDP_ENCAP_VALID_PORT_MIN) +
|
||||
sport = prandom_u32_max(IB_ROCE_UDP_ENCAP_VALID_PORT_MAX + 1 -
|
||||
IB_ROCE_UDP_ENCAP_VALID_PORT_MIN) +
|
||||
IB_ROCE_UDP_ENCAP_VALID_PORT_MIN;
|
||||
else
|
||||
sport = rdma_flow_label_to_udp_sport(fl);
|
||||
|
@ -96,7 +96,7 @@ static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
|
||||
__be64 mlx4_ib_gen_node_guid(void)
|
||||
{
|
||||
#define NODE_GUID_HI ((u64) (((u64)IB_OPENIB_OUI) << 40))
|
||||
return cpu_to_be64(NODE_GUID_HI | prandom_u32());
|
||||
return cpu_to_be64(NODE_GUID_HI | get_random_u32());
|
||||
}
|
||||
|
||||
__be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
|
||||
|
@ -465,7 +465,7 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id,
|
||||
goto err_qp;
|
||||
}
|
||||
|
||||
psn = prandom_u32() & 0xffffff;
|
||||
psn = get_random_u32() & 0xffffff;
|
||||
ret = ipoib_cm_modify_rx_qp(dev, cm_id, p->qp, psn);
|
||||
if (ret)
|
||||
goto err_modify;
|
||||
|
@ -1517,8 +1517,7 @@ static void rtrs_clt_err_recovery_work(struct work_struct *work)
|
||||
rtrs_clt_stop_and_destroy_conns(clt_path);
|
||||
queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork,
|
||||
msecs_to_jiffies(delay_ms +
|
||||
prandom_u32() %
|
||||
RTRS_RECONNECT_SEED));
|
||||
prandom_u32_max(RTRS_RECONNECT_SEED)));
|
||||
}
|
||||
|
||||
static struct rtrs_clt_path *alloc_path(struct rtrs_clt_sess *clt,
|
||||
|
@ -401,7 +401,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
|
||||
}
|
||||
|
||||
if (bypass_torture_test(dc)) {
|
||||
if ((get_random_int() & 3) == 3)
|
||||
if (prandom_u32_max(4) == 3)
|
||||
goto skip;
|
||||
else
|
||||
goto rescale;
|
||||
|
@ -2994,7 +2994,7 @@ static int r5l_load_log(struct r5l_log *log)
|
||||
}
|
||||
create:
|
||||
if (create_super) {
|
||||
log->last_cp_seq = prandom_u32();
|
||||
log->last_cp_seq = get_random_u32();
|
||||
cp = 0;
|
||||
r5l_log_write_empty_meta_block(log, cp, log->last_cp_seq);
|
||||
/*
|
||||
|
@ -870,7 +870,7 @@ static void precalculate_color(struct tpg_data *tpg, int k)
|
||||
g = tpg_colors[col].g;
|
||||
b = tpg_colors[col].b;
|
||||
} else if (tpg->pattern == TPG_PAT_NOISE) {
|
||||
r = g = b = prandom_u32_max(256);
|
||||
r = g = b = get_random_u8();
|
||||
} else if (k == TPG_COLOR_RANDOM) {
|
||||
r = g = b = tpg->qual_offset + prandom_u32_max(196);
|
||||
} else if (k >= TPG_COLOR_RAMP) {
|
||||
|
@ -104,8 +104,8 @@ retry:
|
||||
break;
|
||||
case 2:
|
||||
rds.block |= V4L2_RDS_BLOCK_ERROR;
|
||||
rds.lsb = prandom_u32_max(256);
|
||||
rds.msb = prandom_u32_max(256);
|
||||
rds.lsb = get_random_u8();
|
||||
rds.msb = get_random_u8();
|
||||
break;
|
||||
case 3: /* Skip block altogether */
|
||||
if (i)
|
||||
|
@ -210,7 +210,7 @@ static void vivid_fill_buff_noise(__s16 *tch_buf, int size)
|
||||
|
||||
/* Fill 10% of the values within range -3 and 3, zero the others */
|
||||
for (i = 0; i < size; i++) {
|
||||
unsigned int rand = get_random_int();
|
||||
unsigned int rand = get_random_u32();
|
||||
|
||||
if (rand % 10)
|
||||
tch_buf[i] = 0;
|
||||
@ -221,7 +221,7 @@ static void vivid_fill_buff_noise(__s16 *tch_buf, int size)
|
||||
|
||||
static inline int get_random_pressure(void)
|
||||
{
|
||||
return get_random_int() % VIVID_PRESSURE_LIMIT;
|
||||
return prandom_u32_max(VIVID_PRESSURE_LIMIT);
|
||||
}
|
||||
|
||||
static void vivid_tch_buf_set(struct v4l2_pix_format *f,
|
||||
@ -272,7 +272,7 @@ void vivid_fillbuff_tch(struct vivid_dev *dev, struct vivid_buffer *buf)
|
||||
return;
|
||||
|
||||
if (test_pat_idx == 0)
|
||||
dev->tch_pat_random = get_random_int();
|
||||
dev->tch_pat_random = get_random_u32();
|
||||
rand = dev->tch_pat_random;
|
||||
|
||||
switch (test_pattern) {
|
||||
|
@ -2948,7 +2948,7 @@ static void gaudi2_user_interrupt_setup(struct hl_device *hdev)
|
||||
|
||||
static inline int gaudi2_get_non_zero_random_int(void)
|
||||
{
|
||||
int rand = get_random_int();
|
||||
int rand = get_random_u32();
|
||||
|
||||
return rand ? rand : 1;
|
||||
}
|
||||
|
@ -97,8 +97,8 @@ static void mmc_should_fail_request(struct mmc_host *host,
|
||||
!should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
|
||||
return;
|
||||
|
||||
data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)];
|
||||
data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9;
|
||||
data->error = data_errors[prandom_u32_max(ARRAY_SIZE(data_errors))];
|
||||
data->bytes_xfered = prandom_u32_max(data->bytes_xfered >> 9) << 9;
|
||||
}
|
||||
|
||||
#else /* CONFIG_FAIL_MMC_REQUEST */
|
||||
|
@ -1858,7 +1858,7 @@ static void dw_mci_start_fault_timer(struct dw_mci *host)
|
||||
* Try to inject the error at random points during the data transfer.
|
||||
*/
|
||||
hrtimer_start(&host->fault_timer,
|
||||
ms_to_ktime(prandom_u32() % 25),
|
||||
ms_to_ktime(prandom_u32_max(25)),
|
||||
HRTIMER_MODE_REL);
|
||||
}
|
||||
|
||||
|
@ -1393,7 +1393,7 @@ static int ns_do_read_error(struct nandsim *ns, int num)
|
||||
unsigned int page_no = ns->regs.row;
|
||||
|
||||
if (ns_read_error(page_no)) {
|
||||
prandom_bytes(ns->buf.byte, num);
|
||||
get_random_bytes(ns->buf.byte, num);
|
||||
NS_WARN("simulating read error in page %u\n", page_no);
|
||||
return 1;
|
||||
}
|
||||
@ -1402,12 +1402,12 @@ static int ns_do_read_error(struct nandsim *ns, int num)
|
||||
|
||||
static void ns_do_bit_flips(struct nandsim *ns, int num)
|
||||
{
|
||||
if (bitflips && prandom_u32() < (1 << 22)) {
|
||||
if (bitflips && get_random_u16() < (1 << 6)) {
|
||||
int flips = 1;
|
||||
if (bitflips > 1)
|
||||
flips = (prandom_u32() % (int) bitflips) + 1;
|
||||
flips = prandom_u32_max(bitflips) + 1;
|
||||
while (flips--) {
|
||||
int pos = prandom_u32() % (num * 8);
|
||||
int pos = prandom_u32_max(num * 8);
|
||||
ns->buf.byte[pos / 8] ^= (1 << (pos % 8));
|
||||
NS_WARN("read_page: flipping bit %d in page %d "
|
||||
"reading from %d ecc: corrected=%u failed=%u\n",
|
||||
|
@ -47,7 +47,7 @@ struct nand_ecc_test {
|
||||
static void single_bit_error_data(void *error_data, void *correct_data,
|
||||
size_t size)
|
||||
{
|
||||
unsigned int offset = prandom_u32() % (size * BITS_PER_BYTE);
|
||||
unsigned int offset = prandom_u32_max(size * BITS_PER_BYTE);
|
||||
|
||||
memcpy(error_data, correct_data, size);
|
||||
__change_bit_le(offset, error_data);
|
||||
@ -58,9 +58,9 @@ static void double_bit_error_data(void *error_data, void *correct_data,
|
||||
{
|
||||
unsigned int offset[2];
|
||||
|
||||
offset[0] = prandom_u32() % (size * BITS_PER_BYTE);
|
||||
offset[0] = prandom_u32_max(size * BITS_PER_BYTE);
|
||||
do {
|
||||
offset[1] = prandom_u32() % (size * BITS_PER_BYTE);
|
||||
offset[1] = prandom_u32_max(size * BITS_PER_BYTE);
|
||||
} while (offset[0] == offset[1]);
|
||||
|
||||
memcpy(error_data, correct_data, size);
|
||||
@ -71,7 +71,7 @@ static void double_bit_error_data(void *error_data, void *correct_data,
|
||||
|
||||
static unsigned int random_ecc_bit(size_t size)
|
||||
{
|
||||
unsigned int offset = prandom_u32() % (3 * BITS_PER_BYTE);
|
||||
unsigned int offset = prandom_u32_max(3 * BITS_PER_BYTE);
|
||||
|
||||
if (size == 256) {
|
||||
/*
|
||||
@ -79,7 +79,7 @@ static unsigned int random_ecc_bit(size_t size)
|
||||
* and 17th bit) in ECC code for 256 byte data block
|
||||
*/
|
||||
while (offset == 16 || offset == 17)
|
||||
offset = prandom_u32() % (3 * BITS_PER_BYTE);
|
||||
offset = prandom_u32_max(3 * BITS_PER_BYTE);
|
||||
}
|
||||
|
||||
return offset;
|
||||
@ -266,7 +266,7 @@ static int nand_ecc_test_run(const size_t size)
|
||||
goto error;
|
||||
}
|
||||
|
||||
prandom_bytes(correct_data, size);
|
||||
get_random_bytes(correct_data, size);
|
||||
ecc_sw_hamming_calculate(correct_data, size, correct_ecc, sm_order);
|
||||
for (i = 0; i < ARRAY_SIZE(nand_ecc_test); i++) {
|
||||
nand_ecc_test[i].prepare(error_data, error_ecc,
|
||||
|
@ -223,7 +223,7 @@ static int __init mtd_speedtest_init(void)
|
||||
if (!iobuf)
|
||||
goto out;
|
||||
|
||||
prandom_bytes(iobuf, mtd->erasesize);
|
||||
get_random_bytes(iobuf, mtd->erasesize);
|
||||
|
||||
bbt = kzalloc(ebcnt, GFP_KERNEL);
|
||||
if (!bbt)
|
||||
|
@ -45,9 +45,8 @@ static int rand_eb(void)
|
||||
unsigned int eb;
|
||||
|
||||
again:
|
||||
eb = prandom_u32();
|
||||
/* Read or write up 2 eraseblocks at a time - hence 'ebcnt - 1' */
|
||||
eb %= (ebcnt - 1);
|
||||
eb = prandom_u32_max(ebcnt - 1);
|
||||
if (bbt[eb])
|
||||
goto again;
|
||||
return eb;
|
||||
@ -55,20 +54,12 @@ again:
|
||||
|
||||
static int rand_offs(void)
|
||||
{
|
||||
unsigned int offs;
|
||||
|
||||
offs = prandom_u32();
|
||||
offs %= bufsize;
|
||||
return offs;
|
||||
return prandom_u32_max(bufsize);
|
||||
}
|
||||
|
||||
static int rand_len(int offs)
|
||||
{
|
||||
unsigned int len;
|
||||
|
||||
len = prandom_u32();
|
||||
len %= (bufsize - offs);
|
||||
return len;
|
||||
return prandom_u32_max(bufsize - offs);
|
||||
}
|
||||
|
||||
static int do_read(void)
|
||||
@ -127,7 +118,7 @@ static int do_write(void)
|
||||
|
||||
static int do_operation(void)
|
||||
{
|
||||
if (prandom_u32() & 1)
|
||||
if (prandom_u32_max(2))
|
||||
return do_read();
|
||||
else
|
||||
return do_write();
|
||||
@ -192,7 +183,7 @@ static int __init mtd_stresstest_init(void)
|
||||
goto out;
|
||||
for (i = 0; i < ebcnt; i++)
|
||||
offsets[i] = mtd->erasesize;
|
||||
prandom_bytes(writebuf, bufsize);
|
||||
get_random_bytes(writebuf, bufsize);
|
||||
|
||||
bbt = kzalloc(ebcnt, GFP_KERNEL);
|
||||
if (!bbt)
|
||||
|
@ -590,7 +590,7 @@ int ubi_dbg_power_cut(struct ubi_device *ubi, int caller)
|
||||
|
||||
if (ubi->dbg.power_cut_max > ubi->dbg.power_cut_min) {
|
||||
range = ubi->dbg.power_cut_max - ubi->dbg.power_cut_min;
|
||||
ubi->dbg.power_cut_counter += prandom_u32() % range;
|
||||
ubi->dbg.power_cut_counter += prandom_u32_max(range);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -73,7 +73,7 @@ static inline int ubi_dbg_is_bgt_disabled(const struct ubi_device *ubi)
|
||||
static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi)
|
||||
{
|
||||
if (ubi->dbg.emulate_bitflips)
|
||||
return !(prandom_u32() % 200);
|
||||
return !prandom_u32_max(200);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -87,7 +87,7 @@ static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi)
|
||||
static inline int ubi_dbg_is_write_failure(const struct ubi_device *ubi)
|
||||
{
|
||||
if (ubi->dbg.emulate_io_failures)
|
||||
return !(prandom_u32() % 500);
|
||||
return !prandom_u32_max(500);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -101,7 +101,7 @@ static inline int ubi_dbg_is_write_failure(const struct ubi_device *ubi)
|
||||
static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi)
|
||||
{
|
||||
if (ubi->dbg.emulate_io_failures)
|
||||
return !(prandom_u32() % 400);
|
||||
return !prandom_u32_max(400);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -4806,7 +4806,7 @@ static u32 bond_rr_gen_slave_id(struct bonding *bond)
|
||||
|
||||
switch (packets_per_slave) {
|
||||
case 0:
|
||||
slave_id = prandom_u32();
|
||||
slave_id = get_random_u32();
|
||||
break;
|
||||
case 1:
|
||||
slave_id = this_cpu_inc_return(*bond->rr_tx_counter);
|
||||
|
@ -3874,7 +3874,7 @@ static void bnxt_init_vnics(struct bnxt *bp)
|
||||
|
||||
if (bp->vnic_info[i].rss_hash_key) {
|
||||
if (i == 0)
|
||||
prandom_bytes(vnic->rss_hash_key,
|
||||
get_random_bytes(vnic->rss_hash_key,
|
||||
HW_HASH_KEY_SIZE);
|
||||
else
|
||||
memcpy(vnic->rss_hash_key,
|
||||
|
@ -4105,8 +4105,7 @@ static int cnic_cm_alloc_mem(struct cnic_dev *dev)
|
||||
for (i = 0; i < MAX_CM_SK_TBL_SZ; i++)
|
||||
atomic_set(&cp->csk_tbl[i].ref_count, 0);
|
||||
|
||||
port_id = prandom_u32();
|
||||
port_id %= CNIC_LOCAL_PORT_RANGE;
|
||||
port_id = prandom_u32_max(CNIC_LOCAL_PORT_RANGE);
|
||||
if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
|
||||
CNIC_LOCAL_PORT_MIN, port_id)) {
|
||||
cnic_cm_free_mem(dev);
|
||||
@ -4165,7 +4164,7 @@ static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
|
||||
{
|
||||
u32 seed;
|
||||
|
||||
seed = prandom_u32();
|
||||
seed = get_random_u32();
|
||||
cnic_ctx_wr(dev, 45, 0, seed);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1063,7 +1063,7 @@ static void chtls_pass_accept_rpl(struct sk_buff *skb,
|
||||
opt2 |= WND_SCALE_EN_V(WSCALE_OK(tp));
|
||||
rpl5->opt0 = cpu_to_be64(opt0);
|
||||
rpl5->opt2 = cpu_to_be32(opt2);
|
||||
rpl5->iss = cpu_to_be32((prandom_u32() & ~7UL) - 1);
|
||||
rpl5->iss = cpu_to_be32((get_random_u32() & ~7UL) - 1);
|
||||
set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
|
||||
t4_set_arp_err_handler(skb, sk, chtls_accept_rpl_arp_failure);
|
||||
cxgb4_l2t_send(csk->egress_dev, skb, csk->l2t_entry);
|
||||
@ -1466,7 +1466,7 @@ static void make_established(struct sock *sk, u32 snd_isn, unsigned int opt)
|
||||
tp->write_seq = snd_isn;
|
||||
tp->snd_nxt = snd_isn;
|
||||
tp->snd_una = snd_isn;
|
||||
inet_sk(sk)->inet_id = prandom_u32();
|
||||
inet_sk(sk)->inet_id = get_random_u16();
|
||||
assign_rxopt(sk, opt);
|
||||
|
||||
if (tp->rcv_wnd > (RCV_BUFSIZ_M << 10))
|
||||
|
@ -919,8 +919,8 @@ static int csk_wait_memory(struct chtls_dev *cdev,
|
||||
current_timeo = *timeo_p;
|
||||
noblock = (*timeo_p ? false : true);
|
||||
if (csk_mem_free(cdev, sk)) {
|
||||
current_timeo = (prandom_u32() % (HZ / 5)) + 2;
|
||||
vm_wait = (prandom_u32() % (HZ / 5)) + 2;
|
||||
current_timeo = prandom_u32_max(HZ / 5) + 2;
|
||||
vm_wait = prandom_u32_max(HZ / 5) + 2;
|
||||
}
|
||||
|
||||
add_wait_queue(sk_sleep(sk), &wait);
|
||||
|
@ -129,7 +129,7 @@ static int rocker_reg_test(const struct rocker *rocker)
|
||||
u64 test_reg;
|
||||
u64 rnd;
|
||||
|
||||
rnd = prandom_u32();
|
||||
rnd = get_random_u32();
|
||||
rnd >>= 1;
|
||||
rocker_write32(rocker, TEST_REG, rnd);
|
||||
test_reg = rocker_read32(rocker, TEST_REG);
|
||||
@ -139,9 +139,9 @@ static int rocker_reg_test(const struct rocker *rocker)
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
rnd = prandom_u32();
|
||||
rnd = get_random_u32();
|
||||
rnd <<= 31;
|
||||
rnd |= prandom_u32();
|
||||
rnd |= get_random_u32();
|
||||
rocker_write64(rocker, TEST_REG64, rnd);
|
||||
test_reg = rocker_read64(rocker, TEST_REG64);
|
||||
if (test_reg != rnd * 2) {
|
||||
@ -224,7 +224,7 @@ static int rocker_dma_test_offset(const struct rocker *rocker,
|
||||
if (err)
|
||||
goto unmap;
|
||||
|
||||
prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE);
|
||||
get_random_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE);
|
||||
for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++)
|
||||
expect[i] = ~buf[i];
|
||||
err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT,
|
||||
|
@ -438,7 +438,7 @@ static int transmit(struct baycom_state *bc, int cnt, unsigned char stat)
|
||||
if ((--bc->hdlctx.slotcnt) > 0)
|
||||
return 0;
|
||||
bc->hdlctx.slotcnt = bc->ch_params.slottime;
|
||||
if ((prandom_u32() % 256) > bc->ch_params.ppersist)
|
||||
if (get_random_u8() > bc->ch_params.ppersist)
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
@ -377,7 +377,7 @@ void hdlcdrv_arbitrate(struct net_device *dev, struct hdlcdrv_state *s)
|
||||
if ((--s->hdlctx.slotcnt) > 0)
|
||||
return;
|
||||
s->hdlctx.slotcnt = s->ch_params.slottime;
|
||||
if ((prandom_u32() % 256) > s->ch_params.ppersist)
|
||||
if (get_random_u8() > s->ch_params.ppersist)
|
||||
return;
|
||||
start_tx(dev, s);
|
||||
}
|
||||
|
@ -626,7 +626,7 @@ static void yam_arbitrate(struct net_device *dev)
|
||||
yp->slotcnt = yp->slot / 10;
|
||||
|
||||
/* is random > persist ? */
|
||||
if ((prandom_u32() % 256) > yp->pers)
|
||||
if (get_random_u8() > yp->pers)
|
||||
return;
|
||||
|
||||
yam_start_tx(dev, yp);
|
||||
|
@ -1758,7 +1758,7 @@ static int qca808x_phy_fast_retrain_config(struct phy_device *phydev)
|
||||
|
||||
static int qca808x_phy_ms_random_seed_set(struct phy_device *phydev)
|
||||
{
|
||||
u16 seed_value = (prandom_u32() % QCA808X_MASTER_SLAVE_SEED_RANGE);
|
||||
u16 seed_value = prandom_u32_max(QCA808X_MASTER_SLAVE_SEED_RANGE);
|
||||
|
||||
return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_LOCAL_SEED,
|
||||
QCA808X_MASTER_SLAVE_SEED_CFG,
|
||||
|
@ -284,7 +284,7 @@ static __init bool randomized_test(void)
|
||||
mutex_lock(&mutex);
|
||||
|
||||
for (i = 0; i < NUM_RAND_ROUTES; ++i) {
|
||||
prandom_bytes(ip, 4);
|
||||
get_random_bytes(ip, 4);
|
||||
cidr = prandom_u32_max(32) + 1;
|
||||
peer = peers[prandom_u32_max(NUM_PEERS)];
|
||||
if (wg_allowedips_insert_v4(&t, (struct in_addr *)ip, cidr,
|
||||
@ -299,7 +299,7 @@ static __init bool randomized_test(void)
|
||||
}
|
||||
for (j = 0; j < NUM_MUTATED_ROUTES; ++j) {
|
||||
memcpy(mutated, ip, 4);
|
||||
prandom_bytes(mutate_mask, 4);
|
||||
get_random_bytes(mutate_mask, 4);
|
||||
mutate_amount = prandom_u32_max(32);
|
||||
for (k = 0; k < mutate_amount / 8; ++k)
|
||||
mutate_mask[k] = 0xff;
|
||||
@ -310,7 +310,7 @@ static __init bool randomized_test(void)
|
||||
for (k = 0; k < 4; ++k)
|
||||
mutated[k] = (mutated[k] & mutate_mask[k]) |
|
||||
(~mutate_mask[k] &
|
||||
prandom_u32_max(256));
|
||||
get_random_u8());
|
||||
cidr = prandom_u32_max(32) + 1;
|
||||
peer = peers[prandom_u32_max(NUM_PEERS)];
|
||||
if (wg_allowedips_insert_v4(&t,
|
||||
@ -328,7 +328,7 @@ static __init bool randomized_test(void)
|
||||
}
|
||||
|
||||
for (i = 0; i < NUM_RAND_ROUTES; ++i) {
|
||||
prandom_bytes(ip, 16);
|
||||
get_random_bytes(ip, 16);
|
||||
cidr = prandom_u32_max(128) + 1;
|
||||
peer = peers[prandom_u32_max(NUM_PEERS)];
|
||||
if (wg_allowedips_insert_v6(&t, (struct in6_addr *)ip, cidr,
|
||||
@ -343,7 +343,7 @@ static __init bool randomized_test(void)
|
||||
}
|
||||
for (j = 0; j < NUM_MUTATED_ROUTES; ++j) {
|
||||
memcpy(mutated, ip, 16);
|
||||
prandom_bytes(mutate_mask, 16);
|
||||
get_random_bytes(mutate_mask, 16);
|
||||
mutate_amount = prandom_u32_max(128);
|
||||
for (k = 0; k < mutate_amount / 8; ++k)
|
||||
mutate_mask[k] = 0xff;
|
||||
@ -354,7 +354,7 @@ static __init bool randomized_test(void)
|
||||
for (k = 0; k < 4; ++k)
|
||||
mutated[k] = (mutated[k] & mutate_mask[k]) |
|
||||
(~mutate_mask[k] &
|
||||
prandom_u32_max(256));
|
||||
get_random_u8());
|
||||
cidr = prandom_u32_max(128) + 1;
|
||||
peer = peers[prandom_u32_max(NUM_PEERS)];
|
||||
if (wg_allowedips_insert_v6(&t,
|
||||
@ -381,13 +381,13 @@ static __init bool randomized_test(void)
|
||||
|
||||
for (j = 0;; ++j) {
|
||||
for (i = 0; i < NUM_QUERIES; ++i) {
|
||||
prandom_bytes(ip, 4);
|
||||
get_random_bytes(ip, 4);
|
||||
if (lookup(t.root4, 32, ip) != horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip)) {
|
||||
horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip);
|
||||
pr_err("allowedips random v4 self-test: FAIL\n");
|
||||
goto free;
|
||||
}
|
||||
prandom_bytes(ip, 16);
|
||||
get_random_bytes(ip, 16);
|
||||
if (lookup(t.root6, 128, ip) != horrible_allowedips_lookup_v6(&h, (struct in6_addr *)ip)) {
|
||||
pr_err("allowedips random v6 self-test: FAIL\n");
|
||||
goto free;
|
||||
|
@ -1128,7 +1128,7 @@ static void brcmf_p2p_afx_handler(struct work_struct *work)
|
||||
if (afx_hdl->is_listen && afx_hdl->my_listen_chan)
|
||||
/* 100ms ~ 300ms */
|
||||
err = brcmf_p2p_discover_listen(p2p, afx_hdl->my_listen_chan,
|
||||
100 * (1 + prandom_u32() % 3));
|
||||
100 * (1 + prandom_u32_max(3)));
|
||||
else
|
||||
err = brcmf_p2p_act_frm_search(p2p, afx_hdl->peer_listen_chan);
|
||||
|
||||
|
@ -177,7 +177,7 @@ static int brcmf_pno_set_random(struct brcmf_if *ifp, struct brcmf_pno_info *pi)
|
||||
memcpy(pfn_mac.mac, mac_addr, ETH_ALEN);
|
||||
for (i = 0; i < ETH_ALEN; i++) {
|
||||
pfn_mac.mac[i] &= mac_mask[i];
|
||||
pfn_mac.mac[i] |= get_random_int() & ~(mac_mask[i]);
|
||||
pfn_mac.mac[i] |= get_random_u8() & ~(mac_mask[i]);
|
||||
}
|
||||
/* Clear multi bit */
|
||||
pfn_mac.mac[0] &= 0xFE;
|
||||
|
@ -1099,7 +1099,7 @@ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
|
||||
iwl_mvm_mac_ap_iterator, &data);
|
||||
|
||||
if (data.beacon_device_ts) {
|
||||
u32 rand = (prandom_u32() % (64 - 36)) + 36;
|
||||
u32 rand = prandom_u32_max(64 - 36) + 36;
|
||||
mvmvif->ap_beacon_time = data.beacon_device_ts +
|
||||
ieee80211_tu_to_usec(data.beacon_int * rand /
|
||||
100);
|
||||
|
@ -239,7 +239,7 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
|
||||
tx_info->pkt_len = pkt_len;
|
||||
|
||||
mwifiex_form_mgmt_frame(skb, buf, len);
|
||||
*cookie = prandom_u32() | 1;
|
||||
*cookie = get_random_u32() | 1;
|
||||
|
||||
if (ieee80211_is_action(mgmt->frame_control))
|
||||
skb = mwifiex_clone_skb_for_tx_status(priv,
|
||||
@ -303,7 +303,7 @@ mwifiex_cfg80211_remain_on_channel(struct wiphy *wiphy,
|
||||
duration);
|
||||
|
||||
if (!ret) {
|
||||
*cookie = prandom_u32() | 1;
|
||||
*cookie = get_random_u32() | 1;
|
||||
priv->roc_cfg.cookie = *cookie;
|
||||
priv->roc_cfg.chan = *chan;
|
||||
|
||||
|
@ -1161,7 +1161,7 @@ static int mgmt_tx(struct wiphy *wiphy,
|
||||
const u8 *vendor_ie;
|
||||
int ret = 0;
|
||||
|
||||
*cookie = prandom_u32();
|
||||
*cookie = get_random_u32();
|
||||
priv->tx_cookie = *cookie;
|
||||
mgmt = (const struct ieee80211_mgmt *)buf;
|
||||
|
||||
|
@ -449,7 +449,7 @@ qtnf_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
|
||||
{
|
||||
struct qtnf_vif *vif = qtnf_netdev_get_priv(wdev->netdev);
|
||||
const struct ieee80211_mgmt *mgmt_frame = (void *)params->buf;
|
||||
u32 short_cookie = prandom_u32();
|
||||
u32 short_cookie = get_random_u32();
|
||||
u16 flags = 0;
|
||||
u16 freq;
|
||||
|
||||
|
@ -1594,7 +1594,7 @@ static int cw1200_get_prio_queue(struct cw1200_common *priv,
|
||||
edca = &priv->edca.params[i];
|
||||
score = ((edca->aifns + edca->cwmin) << 16) +
|
||||
((edca->cwmax - edca->cwmin) *
|
||||
(get_random_int() & 0xFFFF));
|
||||
get_random_u16());
|
||||
if (score < best && (winner < 0 || i != 3)) {
|
||||
best = score;
|
||||
winner = i;
|
||||
|
@ -6100,7 +6100,7 @@ static int wl1271_register_hw(struct wl1271 *wl)
|
||||
wl1271_warning("Fuse mac address is zero. using random mac");
|
||||
/* Use TI oui and a random nic */
|
||||
oui_addr = WLCORE_TI_OUI_ADDRESS;
|
||||
nic_addr = get_random_int();
|
||||
nic_addr = get_random_u32();
|
||||
} else {
|
||||
oui_addr = wl->fuse_oui_addr;
|
||||
/* fuse has the BD_ADDR, the WLAN addresses are the next two */
|
||||
|
@ -23,7 +23,7 @@ u32 nvme_auth_get_seqnum(void)
|
||||
|
||||
mutex_lock(&nvme_dhchap_mutex);
|
||||
if (!nvme_dhchap_seqnum)
|
||||
nvme_dhchap_seqnum = prandom_u32();
|
||||
nvme_dhchap_seqnum = get_random_u32();
|
||||
else {
|
||||
nvme_dhchap_seqnum++;
|
||||
if (!nvme_dhchap_seqnum)
|
||||
|
@ -254,7 +254,7 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
|
||||
} else if (is_t5(lldi->adapter_type)) {
|
||||
struct cpl_t5_act_open_req *req =
|
||||
(struct cpl_t5_act_open_req *)skb->head;
|
||||
u32 isn = (prandom_u32() & ~7UL) - 1;
|
||||
u32 isn = (get_random_u32() & ~7UL) - 1;
|
||||
|
||||
INIT_TP_WR(req, 0);
|
||||
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
|
||||
@ -282,7 +282,7 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
|
||||
} else {
|
||||
struct cpl_t6_act_open_req *req =
|
||||
(struct cpl_t6_act_open_req *)skb->head;
|
||||
u32 isn = (prandom_u32() & ~7UL) - 1;
|
||||
u32 isn = (get_random_u32() & ~7UL) - 1;
|
||||
|
||||
INIT_TP_WR(req, 0);
|
||||
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
|
||||
|
@ -2233,7 +2233,7 @@ static void fcoe_ctlr_vn_restart(struct fcoe_ctlr *fip)
|
||||
|
||||
if (fip->probe_tries < FIP_VN_RLIM_COUNT) {
|
||||
fip->probe_tries++;
|
||||
wait = prandom_u32() % FIP_VN_PROBE_WAIT;
|
||||
wait = prandom_u32_max(FIP_VN_PROBE_WAIT);
|
||||
} else
|
||||
wait = FIP_VN_RLIM_INT;
|
||||
mod_timer(&fip->timer, jiffies + msecs_to_jiffies(wait));
|
||||
@ -3125,7 +3125,7 @@ static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *fip)
|
||||
fcoe_all_vn2vn, 0);
|
||||
fip->port_ka_time = jiffies +
|
||||
msecs_to_jiffies(FIP_VN_BEACON_INT +
|
||||
(prandom_u32() % FIP_VN_BEACON_FUZZ));
|
||||
prandom_u32_max(FIP_VN_BEACON_FUZZ));
|
||||
}
|
||||
if (time_before(fip->port_ka_time, next_time))
|
||||
next_time = fip->port_ka_time;
|
||||
|
@ -2156,8 +2156,8 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
|
||||
* This function makes an running random selection decision on FCF record to
|
||||
* use through a sequence of @fcf_cnt eligible FCF records with equal
|
||||
* probability. To perform integer manunipulation of random numbers with
|
||||
* size unit32_t, the lower 16 bits of the 32-bit random number returned
|
||||
* from prandom_u32() are taken as the random random number generated.
|
||||
* size unit32_t, a 16-bit random number returned from get_random_u16() is
|
||||
* taken as the random random number generated.
|
||||
*
|
||||
* Returns true when outcome is for the newly read FCF record should be
|
||||
* chosen; otherwise, return false when outcome is for keeping the previously
|
||||
@ -2169,7 +2169,7 @@ lpfc_sli4_new_fcf_random_select(struct lpfc_hba *phba, uint32_t fcf_cnt)
|
||||
uint32_t rand_num;
|
||||
|
||||
/* Get 16-bit uniform random number */
|
||||
rand_num = 0xFFFF & prandom_u32();
|
||||
rand_num = get_random_u16();
|
||||
|
||||
/* Decision with probability 1/fcf_cnt */
|
||||
if ((fcf_cnt * rand_num) < 0xFFFF)
|
||||
|
@ -618,7 +618,7 @@ static int qedi_cm_alloc_mem(struct qedi_ctx *qedi)
|
||||
sizeof(struct qedi_endpoint *)), GFP_KERNEL);
|
||||
if (!qedi->ep_tbl)
|
||||
return -ENOMEM;
|
||||
port_id = prandom_u32() % QEDI_LOCAL_PORT_RANGE;
|
||||
port_id = prandom_u32_max(QEDI_LOCAL_PORT_RANGE);
|
||||
if (qedi_init_id_tbl(&qedi->lcl_port_tbl, QEDI_LOCAL_PORT_RANGE,
|
||||
QEDI_LOCAL_PORT_MIN, port_id)) {
|
||||
qedi_cm_free_mem(qedi);
|
||||
|
@ -1202,7 +1202,7 @@ cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req)
|
||||
opt2 |= CONG_CNTRL_V(CONG_ALG_NEWRENO);
|
||||
|
||||
opt2 |= T5_ISS_F;
|
||||
rpl5->iss = cpu_to_be32((prandom_u32() & ~7UL) - 1);
|
||||
rpl5->iss = cpu_to_be32((get_random_u32() & ~7UL) - 1);
|
||||
|
||||
opt2 |= T5_OPT_2_VALID_F;
|
||||
|
||||
|
@ -2437,7 +2437,7 @@ int tb_xdomain_init(void)
|
||||
tb_property_add_immediate(xdomain_property_dir, "deviceid", 0x1);
|
||||
tb_property_add_immediate(xdomain_property_dir, "devicerv", 0x80000100);
|
||||
|
||||
xdomain_property_block_gen = prandom_u32();
|
||||
xdomain_property_block_gen = get_random_u32();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -167,7 +167,7 @@ static int uvesafb_exec(struct uvesafb_ktask *task)
|
||||
memcpy(&m->id, &uvesafb_cn_id, sizeof(m->id));
|
||||
m->seq = seq;
|
||||
m->len = len;
|
||||
m->ack = prandom_u32();
|
||||
m->ack = get_random_u32();
|
||||
|
||||
/* uvesafb_task structure */
|
||||
memcpy(m + 1, &task->t, sizeof(task->t));
|
||||
|
@ -362,7 +362,7 @@ static int ceph_fill_fragtree(struct inode *inode,
|
||||
if (nsplits != ci->i_fragtree_nsplits) {
|
||||
update = true;
|
||||
} else if (nsplits) {
|
||||
i = prandom_u32() % nsplits;
|
||||
i = prandom_u32_max(nsplits);
|
||||
id = le32_to_cpu(fragtree->splits[i].frag);
|
||||
if (!__ceph_find_frag(ci, id))
|
||||
update = true;
|
||||
|
@ -29,7 +29,7 @@ static int __mdsmap_get_random_mds(struct ceph_mdsmap *m, bool ignore_laggy)
|
||||
return -1;
|
||||
|
||||
/* pick */
|
||||
n = prandom_u32() % n;
|
||||
n = prandom_u32_max(n);
|
||||
for (j = 0, i = 0; i < m->possible_max_rank; i++) {
|
||||
if (CEPH_MDS_IS_READY(i, ignore_laggy))
|
||||
j++;
|
||||
|
@ -552,7 +552,7 @@ static int exfat_fill_inode(struct inode *inode, struct exfat_dir_entry *info)
|
||||
inode->i_uid = sbi->options.fs_uid;
|
||||
inode->i_gid = sbi->options.fs_gid;
|
||||
inode_inc_iversion(inode);
|
||||
inode->i_generation = prandom_u32();
|
||||
inode->i_generation = get_random_u32();
|
||||
|
||||
if (info->attr & ATTR_SUBDIR) { /* directory */
|
||||
inode->i_generation &= ~1;
|
||||
|
@ -277,8 +277,7 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent)
|
||||
int best_ndir = inodes_per_group;
|
||||
int best_group = -1;
|
||||
|
||||
group = prandom_u32();
|
||||
parent_group = (unsigned)group % ngroups;
|
||||
parent_group = prandom_u32_max(ngroups);
|
||||
for (i = 0; i < ngroups; i++) {
|
||||
group = (parent_group + i) % ngroups;
|
||||
desc = ext2_get_group_desc (sb, group, NULL);
|
||||
|
@ -463,10 +463,9 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent,
|
||||
hinfo.hash_version = DX_HASH_HALF_MD4;
|
||||
hinfo.seed = sbi->s_hash_seed;
|
||||
ext4fs_dirhash(parent, qstr->name, qstr->len, &hinfo);
|
||||
grp = hinfo.hash;
|
||||
parent_group = hinfo.hash % ngroups;
|
||||
} else
|
||||
grp = prandom_u32();
|
||||
parent_group = (unsigned)grp % ngroups;
|
||||
parent_group = prandom_u32_max(ngroups);
|
||||
for (i = 0; i < ngroups; i++) {
|
||||
g = (parent_group + i) % ngroups;
|
||||
get_orlov_stats(sb, g, flex_size, &stats);
|
||||
@ -1280,7 +1279,7 @@ got:
|
||||
EXT4_GROUP_INFO_IBITMAP_CORRUPT);
|
||||
goto out;
|
||||
}
|
||||
inode->i_generation = prandom_u32();
|
||||
inode->i_generation = get_random_u32();
|
||||
|
||||
/* Precompute checksum seed for inode metadata */
|
||||
if (ext4_has_metadata_csum(sb)) {
|
||||
|
@ -454,8 +454,8 @@ static long swap_inode_boot_loader(struct super_block *sb,
|
||||
inode->i_ctime = inode_bl->i_ctime = current_time(inode);
|
||||
inode_inc_iversion(inode);
|
||||
|
||||
inode->i_generation = prandom_u32();
|
||||
inode_bl->i_generation = prandom_u32();
|
||||
inode->i_generation = get_random_u32();
|
||||
inode_bl->i_generation = get_random_u32();
|
||||
ext4_reset_inode_seed(inode);
|
||||
ext4_reset_inode_seed(inode_bl);
|
||||
|
||||
|
@ -265,7 +265,7 @@ static unsigned int mmp_new_seq(void)
|
||||
u32 new_seq;
|
||||
|
||||
do {
|
||||
new_seq = prandom_u32();
|
||||
new_seq = get_random_u32();
|
||||
} while (new_seq > EXT4_MMP_SEQ_MAX);
|
||||
|
||||
return new_seq;
|
||||
|
@ -3782,8 +3782,7 @@ cont_thread:
|
||||
}
|
||||
if (!progress) {
|
||||
elr->lr_next_sched = jiffies +
|
||||
(prandom_u32()
|
||||
% (EXT4_DEF_LI_MAX_START_DELAY * HZ));
|
||||
prandom_u32_max(EXT4_DEF_LI_MAX_START_DELAY * HZ);
|
||||
}
|
||||
if (time_before(elr->lr_next_sched, next_wakeup))
|
||||
next_wakeup = elr->lr_next_sched;
|
||||
@ -3930,8 +3929,8 @@ static struct ext4_li_request *ext4_li_request_new(struct super_block *sb,
|
||||
* spread the inode table initialization requests
|
||||
* better.
|
||||
*/
|
||||
elr->lr_next_sched = jiffies + (prandom_u32() %
|
||||
(EXT4_DEF_LI_MAX_START_DELAY * HZ));
|
||||
elr->lr_next_sched = jiffies + prandom_u32_max(
|
||||
EXT4_DEF_LI_MAX_START_DELAY * HZ);
|
||||
return elr;
|
||||
}
|
||||
|
||||
|
@ -282,7 +282,7 @@ static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
|
||||
|
||||
/* let's select beginning hot/small space first in no_heap mode*/
|
||||
if (f2fs_need_rand_seg(sbi))
|
||||
p->offset = prandom_u32() % (MAIN_SECS(sbi) * sbi->segs_per_sec);
|
||||
p->offset = prandom_u32_max(MAIN_SECS(sbi) * sbi->segs_per_sec);
|
||||
else if (test_opt(sbi, NOHEAP) &&
|
||||
(type == CURSEG_HOT_DATA || IS_NODESEG(type)))
|
||||
p->offset = 0;
|
||||
|
@ -50,7 +50,7 @@ static struct inode *f2fs_new_inode(struct user_namespace *mnt_userns,
|
||||
inode->i_blocks = 0;
|
||||
inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
|
||||
F2FS_I(inode)->i_crtime = inode->i_mtime;
|
||||
inode->i_generation = prandom_u32();
|
||||
inode->i_generation = get_random_u32();
|
||||
|
||||
if (S_ISDIR(inode->i_mode))
|
||||
F2FS_I(inode)->i_current_depth = 1;
|
||||
|
@ -2534,7 +2534,7 @@ static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
|
||||
|
||||
sanity_check_seg_type(sbi, seg_type);
|
||||
if (f2fs_need_rand_seg(sbi))
|
||||
return prandom_u32() % (MAIN_SECS(sbi) * sbi->segs_per_sec);
|
||||
return prandom_u32_max(MAIN_SECS(sbi) * sbi->segs_per_sec);
|
||||
|
||||
/* if segs_per_sec is large than 1, we need to keep original policy. */
|
||||
if (__is_large_section(sbi))
|
||||
@ -2588,7 +2588,7 @@ static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
|
||||
curseg->alloc_type = LFS;
|
||||
if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK)
|
||||
curseg->fragment_remained_chunk =
|
||||
prandom_u32() % sbi->max_fragment_chunk + 1;
|
||||
prandom_u32_max(sbi->max_fragment_chunk) + 1;
|
||||
}
|
||||
|
||||
static int __next_free_blkoff(struct f2fs_sb_info *sbi,
|
||||
@ -2625,9 +2625,9 @@ static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
|
||||
/* To allocate block chunks in different sizes, use random number */
|
||||
if (--seg->fragment_remained_chunk <= 0) {
|
||||
seg->fragment_remained_chunk =
|
||||
prandom_u32() % sbi->max_fragment_chunk + 1;
|
||||
prandom_u32_max(sbi->max_fragment_chunk) + 1;
|
||||
seg->next_blkoff +=
|
||||
prandom_u32() % sbi->max_fragment_hole + 1;
|
||||
prandom_u32_max(sbi->max_fragment_hole) + 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -523,7 +523,7 @@ int fat_fill_inode(struct inode *inode, struct msdos_dir_entry *de)
|
||||
inode->i_uid = sbi->options.fs_uid;
|
||||
inode->i_gid = sbi->options.fs_gid;
|
||||
inode_inc_iversion(inode);
|
||||
inode->i_generation = prandom_u32();
|
||||
inode->i_generation = get_random_u32();
|
||||
|
||||
if ((de->attr & ATTR_DIR) && !IS_FREE(de->name)) {
|
||||
inode->i_generation &= ~1;
|
||||
|
@ -4375,8 +4375,8 @@ nfsd4_init_leases_net(struct nfsd_net *nn)
|
||||
nn->nfsd4_grace = 90;
|
||||
nn->somebody_reclaimed = false;
|
||||
nn->track_reclaim_completes = false;
|
||||
nn->clverifier_counter = prandom_u32();
|
||||
nn->clientid_base = prandom_u32();
|
||||
nn->clverifier_counter = get_random_u32();
|
||||
nn->clientid_base = get_random_u32();
|
||||
nn->clientid_counter = nn->clientid_base + 1;
|
||||
nn->s2s_cp_cl_id = nn->clientid_counter++;
|
||||
|
||||
|
@ -3819,7 +3819,7 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
|
||||
}
|
||||
|
||||
log_init_pg_hdr(log, page_size, page_size, 1, 1);
|
||||
log_create(log, l_size, 0, get_random_int(), false, false);
|
||||
log_create(log, l_size, 0, get_random_u32(), false, false);
|
||||
|
||||
log->ra = ra;
|
||||
|
||||
@ -3893,7 +3893,7 @@ check_restart_area:
|
||||
|
||||
/* Do some checks based on whether we have a valid log page. */
|
||||
if (!rst_info.valid_page) {
|
||||
open_log_count = get_random_int();
|
||||
open_log_count = get_random_u32();
|
||||
goto init_log_instance;
|
||||
}
|
||||
open_log_count = le32_to_cpu(ra2->open_log_count);
|
||||
@ -4044,7 +4044,7 @@ find_oldest:
|
||||
memcpy(ra->clients, Add2Ptr(ra2, t16),
|
||||
le16_to_cpu(ra2->ra_len) - t16);
|
||||
|
||||
log->current_openlog_count = get_random_int();
|
||||
log->current_openlog_count = get_random_u32();
|
||||
ra->open_log_count = cpu_to_le32(log->current_openlog_count);
|
||||
log->ra_size = offsetof(struct RESTART_AREA, clients) +
|
||||
sizeof(struct CLIENT_REC);
|
||||
|
@ -2467,7 +2467,7 @@ error_dump:
|
||||
|
||||
static inline int chance(unsigned int n, unsigned int out_of)
|
||||
{
|
||||
return !!((prandom_u32() % out_of) + 1 <= n);
|
||||
return !!(prandom_u32_max(out_of) + 1 <= n);
|
||||
|
||||
}
|
||||
|
||||
@ -2485,13 +2485,13 @@ static int power_cut_emulated(struct ubifs_info *c, int lnum, int write)
|
||||
if (chance(1, 2)) {
|
||||
d->pc_delay = 1;
|
||||
/* Fail within 1 minute */
|
||||
delay = prandom_u32() % 60000;
|
||||
delay = prandom_u32_max(60000);
|
||||
d->pc_timeout = jiffies;
|
||||
d->pc_timeout += msecs_to_jiffies(delay);
|
||||
ubifs_warn(c, "failing after %lums", delay);
|
||||
} else {
|
||||
d->pc_delay = 2;
|
||||
delay = prandom_u32() % 10000;
|
||||
delay = prandom_u32_max(10000);
|
||||
/* Fail within 10000 operations */
|
||||
d->pc_cnt_max = delay;
|
||||
ubifs_warn(c, "failing after %lu calls", delay);
|
||||
@ -2571,7 +2571,7 @@ static int corrupt_data(const struct ubifs_info *c, const void *buf,
|
||||
unsigned int from, to, ffs = chance(1, 2);
|
||||
unsigned char *p = (void *)buf;
|
||||
|
||||
from = prandom_u32() % len;
|
||||
from = prandom_u32_max(len);
|
||||
/* Corruption span max to end of write unit */
|
||||
to = min(len, ALIGN(from + 1, c->max_write_size));
|
||||
|
||||
@ -2581,7 +2581,7 @@ static int corrupt_data(const struct ubifs_info *c, const void *buf,
|
||||
if (ffs)
|
||||
memset(p + from, 0xFF, to - from);
|
||||
else
|
||||
prandom_bytes(p + from, to - from);
|
||||
get_random_bytes(p + from, to - from);
|
||||
|
||||
return to;
|
||||
}
|
||||
|
@ -503,7 +503,7 @@ static void mark_inode_clean(struct ubifs_info *c, struct ubifs_inode *ui)
|
||||
static void set_dent_cookie(struct ubifs_info *c, struct ubifs_dent_node *dent)
|
||||
{
|
||||
if (c->double_hash)
|
||||
dent->cookie = (__force __le32) prandom_u32();
|
||||
dent->cookie = (__force __le32) get_random_u32();
|
||||
else
|
||||
dent->cookie = 0;
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user