mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 20:22:09 +00:00
net: Replace get_cpu_var through this_cpu_ptr
Replace uses of get_cpu_var for address calculation through this_cpu_ptr. Cc: netdev@vger.kernel.org Cc: Eric Dumazet <edumazet@google.com> Acked-by: David S. Miller <davem@davemloft.net> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
parent
f7f66b05aa
commit
903ceff7ca
@ -242,7 +242,7 @@ extern s32 (*nf_ct_nat_offset)(const struct nf_conn *ct,
|
||||
DECLARE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
|
||||
static inline struct nf_conn *nf_ct_untracked_get(void)
|
||||
{
|
||||
return &__raw_get_cpu_var(nf_conntrack_untracked);
|
||||
return raw_cpu_ptr(&nf_conntrack_untracked);
|
||||
}
|
||||
void nf_ct_untracked_status_or(unsigned long bits);
|
||||
|
||||
|
@ -168,7 +168,7 @@ struct linux_xfrm_mib {
|
||||
|
||||
#define SNMP_ADD_STATS64_BH(mib, field, addend) \
|
||||
do { \
|
||||
__typeof__(*mib) *ptr = __this_cpu_ptr(mib); \
|
||||
__typeof__(*mib) *ptr = raw_cpu_ptr(mib); \
|
||||
u64_stats_update_begin(&ptr->syncp); \
|
||||
ptr->mibs[field] += addend; \
|
||||
u64_stats_update_end(&ptr->syncp); \
|
||||
@ -190,7 +190,7 @@ struct linux_xfrm_mib {
|
||||
#define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend) \
|
||||
do { \
|
||||
__typeof__(*mib) *ptr; \
|
||||
ptr = __this_cpu_ptr(mib); \
|
||||
ptr = raw_cpu_ptr((mib)); \
|
||||
u64_stats_update_begin(&ptr->syncp); \
|
||||
ptr->mibs[basefield##PKTS]++; \
|
||||
ptr->mibs[basefield##OCTETS] += addend; \
|
||||
|
@ -2153,7 +2153,7 @@ static inline void __netif_reschedule(struct Qdisc *q)
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
sd = &__get_cpu_var(softnet_data);
|
||||
sd = this_cpu_ptr(&softnet_data);
|
||||
q->next_sched = NULL;
|
||||
*sd->output_queue_tailp = q;
|
||||
sd->output_queue_tailp = &q->next_sched;
|
||||
@ -3195,7 +3195,7 @@ static void rps_trigger_softirq(void *data)
|
||||
static int rps_ipi_queued(struct softnet_data *sd)
|
||||
{
|
||||
#ifdef CONFIG_RPS
|
||||
struct softnet_data *mysd = &__get_cpu_var(softnet_data);
|
||||
struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
|
||||
|
||||
if (sd != mysd) {
|
||||
sd->rps_ipi_next = mysd->rps_ipi_list;
|
||||
@ -3222,7 +3222,7 @@ static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
|
||||
if (qlen < (netdev_max_backlog >> 1))
|
||||
return false;
|
||||
|
||||
sd = &__get_cpu_var(softnet_data);
|
||||
sd = this_cpu_ptr(&softnet_data);
|
||||
|
||||
rcu_read_lock();
|
||||
fl = rcu_dereference(sd->flow_limit);
|
||||
@ -3369,7 +3369,7 @@ EXPORT_SYMBOL(netif_rx_ni);
|
||||
|
||||
static void net_tx_action(struct softirq_action *h)
|
||||
{
|
||||
struct softnet_data *sd = &__get_cpu_var(softnet_data);
|
||||
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
|
||||
|
||||
if (sd->completion_queue) {
|
||||
struct sk_buff *clist;
|
||||
@ -3794,7 +3794,7 @@ EXPORT_SYMBOL(netif_receive_skb);
|
||||
static void flush_backlog(void *arg)
|
||||
{
|
||||
struct net_device *dev = arg;
|
||||
struct softnet_data *sd = &__get_cpu_var(softnet_data);
|
||||
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
|
||||
struct sk_buff *skb, *tmp;
|
||||
|
||||
rps_lock(sd);
|
||||
@ -4301,7 +4301,7 @@ void __napi_schedule(struct napi_struct *n)
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
____napi_schedule(&__get_cpu_var(softnet_data), n);
|
||||
____napi_schedule(this_cpu_ptr(&softnet_data), n);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
EXPORT_SYMBOL(__napi_schedule);
|
||||
@ -4422,7 +4422,7 @@ EXPORT_SYMBOL(netif_napi_del);
|
||||
|
||||
static void net_rx_action(struct softirq_action *h)
|
||||
{
|
||||
struct softnet_data *sd = &__get_cpu_var(softnet_data);
|
||||
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
|
||||
unsigned long time_limit = jiffies + 2;
|
||||
int budget = netdev_budget;
|
||||
void *have;
|
||||
|
@ -146,7 +146,7 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
data = &__get_cpu_var(dm_cpu_data);
|
||||
data = this_cpu_ptr(&dm_cpu_data);
|
||||
spin_lock(&data->lock);
|
||||
dskb = data->skb;
|
||||
|
||||
|
@ -345,7 +345,7 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
nc = &__get_cpu_var(netdev_alloc_cache);
|
||||
nc = this_cpu_ptr(&netdev_alloc_cache);
|
||||
if (unlikely(!nc->frag.page)) {
|
||||
refill:
|
||||
for (order = NETDEV_FRAG_PAGE_MAX_ORDER; ;) {
|
||||
|
@ -1311,7 +1311,7 @@ static bool rt_cache_route(struct fib_nh *nh, struct rtable *rt)
|
||||
if (rt_is_input_route(rt)) {
|
||||
p = (struct rtable **)&nh->nh_rth_input;
|
||||
} else {
|
||||
p = (struct rtable **)__this_cpu_ptr(nh->nh_pcpu_rth_output);
|
||||
p = (struct rtable **)raw_cpu_ptr(nh->nh_pcpu_rth_output);
|
||||
}
|
||||
orig = *p;
|
||||
|
||||
@ -1939,7 +1939,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
|
||||
do_cache = false;
|
||||
goto add;
|
||||
}
|
||||
prth = __this_cpu_ptr(nh->nh_pcpu_rth_output);
|
||||
prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
|
||||
}
|
||||
rth = rcu_dereference(*prth);
|
||||
if (rt_cache_valid(rth)) {
|
||||
|
@ -40,7 +40,7 @@ static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
|
||||
|
||||
net_get_random_once(syncookie_secret, sizeof(syncookie_secret));
|
||||
|
||||
tmp = __get_cpu_var(ipv4_cookie_scratch);
|
||||
tmp = this_cpu_ptr(ipv4_cookie_scratch);
|
||||
memcpy(tmp + 4, syncookie_secret[c], sizeof(syncookie_secret[c]));
|
||||
tmp[0] = (__force u32)saddr;
|
||||
tmp[1] = (__force u32)daddr;
|
||||
|
@ -3058,7 +3058,7 @@ struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
|
||||
local_bh_disable();
|
||||
p = ACCESS_ONCE(tcp_md5sig_pool);
|
||||
if (p)
|
||||
return __this_cpu_ptr(p);
|
||||
return raw_cpu_ptr(p);
|
||||
|
||||
local_bh_enable();
|
||||
return NULL;
|
||||
|
@ -842,7 +842,7 @@ void tcp_wfree(struct sk_buff *skb)
|
||||
|
||||
/* queue this socket to tasklet queue */
|
||||
local_irq_save(flags);
|
||||
tsq = &__get_cpu_var(tsq_tasklet);
|
||||
tsq = this_cpu_ptr(&tsq_tasklet);
|
||||
list_add(&tp->tsq_node, &tsq->head);
|
||||
tasklet_schedule(&tsq->tasklet);
|
||||
local_irq_restore(flags);
|
||||
|
@ -67,7 +67,7 @@ static u32 cookie_hash(const struct in6_addr *saddr, const struct in6_addr *dadd
|
||||
|
||||
net_get_random_once(syncookie6_secret, sizeof(syncookie6_secret));
|
||||
|
||||
tmp = __get_cpu_var(ipv6_cookie_scratch);
|
||||
tmp = this_cpu_ptr(ipv6_cookie_scratch);
|
||||
|
||||
/*
|
||||
* we have 320 bits of information to hash, copy in the remaining
|
||||
|
@ -267,7 +267,7 @@ static inline struct rds_ib_mr *rds_ib_reuse_fmr(struct rds_ib_mr_pool *pool)
|
||||
unsigned long *flag;
|
||||
|
||||
preempt_disable();
|
||||
flag = &__get_cpu_var(clean_list_grace);
|
||||
flag = this_cpu_ptr(&clean_list_grace);
|
||||
set_bit(CLEAN_LIST_BUSY_BIT, flag);
|
||||
ret = llist_del_first(&pool->clean_list);
|
||||
if (ret)
|
||||
|
Loading…
Reference in New Issue
Block a user