x86, UV: Remove BAU check for stay-busy

Remove a faulty assumption that a long running BAU request has
encountered a hardware problem and will never finish.

Numalink congestion can make a request appear to have
encountered such a problem, but it is not safe to cancel the
request.  If such a cancel is done but a reply is later received
we can miss a TLB shootdown.

We depend upon the max_bau_concurrent 'throttle' to prevent the
stay-busy case from happening.

Signed-off-by: Cliff Wickman <cpw@sgi.com>
Cc: gregkh@suse.de
LKML-Reference: <E1OJvNy-0004ad-BV@eag09.americas.sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Cliff Wickman 2010-06-02 16:22:02 -05:00 committed by Ingo Molnar
parent a8328ee58c
commit 90cc7d9449
2 changed files with 0 additions and 24 deletions
arch/x86
include/asm/uv
kernel

View File

@ -402,7 +402,6 @@ struct bau_control {
unsigned short uvhub_quiesce; unsigned short uvhub_quiesce;
short socket_acknowledge_count[DEST_Q_SIZE]; short socket_acknowledge_count[DEST_Q_SIZE];
cycles_t send_message; cycles_t send_message;
spinlock_t masks_lock;
spinlock_t uvhub_lock; spinlock_t uvhub_lock;
spinlock_t queue_lock; spinlock_t queue_lock;
/* tunables */ /* tunables */

View File

@ -405,12 +405,10 @@ static int uv_wait_completion(struct bau_desc *bau_desc,
unsigned long mmr; unsigned long mmr;
unsigned long mask; unsigned long mask;
cycles_t ttime; cycles_t ttime;
cycles_t timeout_time;
struct ptc_stats *stat = bcp->statp; struct ptc_stats *stat = bcp->statp;
struct bau_control *hmaster; struct bau_control *hmaster;
hmaster = bcp->uvhub_master; hmaster = bcp->uvhub_master;
timeout_time = get_cycles() + bcp->timeout_interval;
/* spin on the status MMR, waiting for it to go idle */ /* spin on the status MMR, waiting for it to go idle */
while ((descriptor_status = (((unsigned long) while ((descriptor_status = (((unsigned long)
@ -450,26 +448,6 @@ static int uv_wait_completion(struct bau_desc *bau_desc,
* descriptor_status is still BUSY * descriptor_status is still BUSY
*/ */
cpu_relax(); cpu_relax();
relaxes++;
if (relaxes >= 10000) {
relaxes = 0;
if (get_cycles() > timeout_time) {
quiesce_local_uvhub(hmaster);
/* single-thread the register change */
spin_lock(&hmaster->masks_lock);
mmr = uv_read_local_mmr(mmr_offset);
mask = 0UL;
mask |= (3UL < right_shift);
mask = ~mask;
mmr &= mask;
uv_write_local_mmr(mmr_offset, mmr);
spin_unlock(&hmaster->masks_lock);
end_uvhub_quiesce(hmaster);
stat->s_busy++;
return FLUSH_GIVEUP;
}
}
} }
} }
bcp->conseccompletes++; bcp->conseccompletes++;
@ -1580,7 +1558,6 @@ static void uv_init_per_cpu(int nuvhubs)
for_each_present_cpu(cpu) { for_each_present_cpu(cpu) {
bcp = &per_cpu(bau_control, cpu); bcp = &per_cpu(bau_control, cpu);
memset(bcp, 0, sizeof(struct bau_control)); memset(bcp, 0, sizeof(struct bau_control));
spin_lock_init(&bcp->masks_lock);
pnode = uv_cpu_hub_info(cpu)->pnode; pnode = uv_cpu_hub_info(cpu)->pnode;
uvhub = uv_cpu_hub_info(cpu)->numa_blade_id; uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
uvhub_mask |= (1 << uvhub); uvhub_mask |= (1 << uvhub);