forked from Minki/linux
mlx4: do not use rwlock in fast path
Using a reader-writer lock in fast path is silly, when we can instead use RCU or a seqlock. For mlx4 hwstamp clock, a seqlock is the way to go, removing two atomic operations and false sharing. Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Tariq Toukan <tariqt@mellanox.com> Reviewed-by: Tariq Toukan <tariqt@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
d5bc1613d0
commit
99f5711e7c
@ -62,12 +62,13 @@ void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
|
||||
struct skb_shared_hwtstamps *hwts,
|
||||
u64 timestamp)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned int seq;
|
||||
u64 nsec;
|
||||
|
||||
read_lock_irqsave(&mdev->clock_lock, flags);
|
||||
nsec = timecounter_cyc2time(&mdev->clock, timestamp);
|
||||
read_unlock_irqrestore(&mdev->clock_lock, flags);
|
||||
do {
|
||||
seq = read_seqbegin(&mdev->clock_lock);
|
||||
nsec = timecounter_cyc2time(&mdev->clock, timestamp);
|
||||
} while (read_seqretry(&mdev->clock_lock, seq));
|
||||
|
||||
memset(hwts, 0, sizeof(struct skb_shared_hwtstamps));
|
||||
hwts->hwtstamp = ns_to_ktime(nsec);
|
||||
@ -95,9 +96,9 @@ void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev)
|
||||
unsigned long flags;
|
||||
|
||||
if (timeout) {
|
||||
write_lock_irqsave(&mdev->clock_lock, flags);
|
||||
write_seqlock_irqsave(&mdev->clock_lock, flags);
|
||||
timecounter_read(&mdev->clock);
|
||||
write_unlock_irqrestore(&mdev->clock_lock, flags);
|
||||
write_sequnlock_irqrestore(&mdev->clock_lock, flags);
|
||||
mdev->last_overflow_check = jiffies;
|
||||
}
|
||||
}
|
||||
@ -128,10 +129,10 @@ static int mlx4_en_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
|
||||
adj *= delta;
|
||||
diff = div_u64(adj, 1000000000ULL);
|
||||
|
||||
write_lock_irqsave(&mdev->clock_lock, flags);
|
||||
write_seqlock_irqsave(&mdev->clock_lock, flags);
|
||||
timecounter_read(&mdev->clock);
|
||||
mdev->cycles.mult = neg_adj ? mult - diff : mult + diff;
|
||||
write_unlock_irqrestore(&mdev->clock_lock, flags);
|
||||
write_sequnlock_irqrestore(&mdev->clock_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -149,9 +150,9 @@ static int mlx4_en_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
|
||||
ptp_clock_info);
|
||||
unsigned long flags;
|
||||
|
||||
write_lock_irqsave(&mdev->clock_lock, flags);
|
||||
write_seqlock_irqsave(&mdev->clock_lock, flags);
|
||||
timecounter_adjtime(&mdev->clock, delta);
|
||||
write_unlock_irqrestore(&mdev->clock_lock, flags);
|
||||
write_sequnlock_irqrestore(&mdev->clock_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -172,9 +173,9 @@ static int mlx4_en_phc_gettime(struct ptp_clock_info *ptp,
|
||||
unsigned long flags;
|
||||
u64 ns;
|
||||
|
||||
write_lock_irqsave(&mdev->clock_lock, flags);
|
||||
write_seqlock_irqsave(&mdev->clock_lock, flags);
|
||||
ns = timecounter_read(&mdev->clock);
|
||||
write_unlock_irqrestore(&mdev->clock_lock, flags);
|
||||
write_sequnlock_irqrestore(&mdev->clock_lock, flags);
|
||||
|
||||
*ts = ns_to_timespec64(ns);
|
||||
|
||||
@ -198,9 +199,9 @@ static int mlx4_en_phc_settime(struct ptp_clock_info *ptp,
|
||||
unsigned long flags;
|
||||
|
||||
/* reset the timecounter */
|
||||
write_lock_irqsave(&mdev->clock_lock, flags);
|
||||
write_seqlock_irqsave(&mdev->clock_lock, flags);
|
||||
timecounter_init(&mdev->clock, &mdev->cycles, ns);
|
||||
write_unlock_irqrestore(&mdev->clock_lock, flags);
|
||||
write_sequnlock_irqrestore(&mdev->clock_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -266,7 +267,7 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
|
||||
if (mdev->ptp_clock)
|
||||
return;
|
||||
|
||||
rwlock_init(&mdev->clock_lock);
|
||||
seqlock_init(&mdev->clock_lock);
|
||||
|
||||
memset(&mdev->cycles, 0, sizeof(mdev->cycles));
|
||||
mdev->cycles.read = mlx4_en_read_clock;
|
||||
@ -276,10 +277,10 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
|
||||
clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift);
|
||||
mdev->nominal_c_mult = mdev->cycles.mult;
|
||||
|
||||
write_lock_irqsave(&mdev->clock_lock, flags);
|
||||
write_seqlock_irqsave(&mdev->clock_lock, flags);
|
||||
timecounter_init(&mdev->clock, &mdev->cycles,
|
||||
ktime_to_ns(ktime_get_real()));
|
||||
write_unlock_irqrestore(&mdev->clock_lock, flags);
|
||||
write_sequnlock_irqrestore(&mdev->clock_lock, flags);
|
||||
|
||||
/* Calculate period in seconds to call the overflow watchdog - to make
|
||||
* sure counter is checked at least once every wrap around.
|
||||
|
@ -424,9 +424,9 @@ struct mlx4_en_dev {
|
||||
u32 priv_pdn;
|
||||
spinlock_t uar_lock;
|
||||
u8 mac_removed[MLX4_MAX_PORTS + 1];
|
||||
rwlock_t clock_lock;
|
||||
u32 nominal_c_mult;
|
||||
struct cyclecounter cycles;
|
||||
seqlock_t clock_lock;
|
||||
struct timecounter clock;
|
||||
unsigned long last_overflow_check;
|
||||
unsigned long overflow_period;
|
||||
|
Loading…
Reference in New Issue
Block a user