diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index f3aa4e65b15e..2235432c59d1 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -472,7 +472,8 @@ struct nf_conn *nf_conntrack_alloc(struct net *net, struct nf_conn *ct; if (unlikely(!nf_conntrack_hash_rnd_initted)) { - get_random_bytes(&nf_conntrack_hash_rnd, 4); + get_random_bytes(&nf_conntrack_hash_rnd, + sizeof(nf_conntrack_hash_rnd)); nf_conntrack_hash_rnd_initted = 1; } @@ -1103,7 +1104,7 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) /* We have to rehahs for the new table anyway, so we also can * use a newrandom seed */ - get_random_bytes(&rnd, 4); + get_random_bytes(&rnd, sizeof(rnd)); /* Lookups in the old hash might happen in parallel, which means we * might get false negatives during connection lookup. New connections diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index 3a8a34a6d37c..357ba39d4c8d 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c @@ -72,7 +72,8 @@ static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple unsigned int hash; if (unlikely(!nf_ct_expect_hash_rnd_initted)) { - get_random_bytes(&nf_ct_expect_hash_rnd, 4); + get_random_bytes(&nf_ct_expect_hash_rnd, + sizeof(nf_ct_expect_hash_rnd)); nf_ct_expect_hash_rnd_initted = 1; } diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index f97fded024c4..2482055e1c56 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c @@ -149,7 +149,7 @@ dsthash_alloc_init(struct xt_hashlimit_htable *ht, /* initialize hash with random val at the time we allocate * the first hashtable entry */ if (!ht->rnd_initialized) { - get_random_bytes(&ht->rnd, 4); + get_random_bytes(&ht->rnd, sizeof(ht->rnd)); ht->rnd_initialized = 1; }