mirror of
https://github.com/torvalds/linux.git
synced 2024-12-02 00:51:44 +00:00
cf8e865810
The Itanium architecture is obsolete, and an informal survey [0] reveals that any residual use of Itanium hardware in production is mostly HP-UX or OpenVMS based. The use of Linux on Itanium appears to be limited to enthusiasts that occasionally boot a fresh Linux kernel to see whether things are still working as intended, and perhaps to churn out some distro packages that are rarely used in practice. None of the original companies behind Itanium still produce or support any hardware or software for the architecture, and it is listed as 'Orphaned' in the MAINTAINERS file, as apparently, none of the engineers that contributed on behalf of those companies (nor anyone else, for that matter) have been willing to support or maintain the architecture upstream or even be responsible for applying the odd fix. The Intel firmware team removed all IA-64 support from the Tianocore/EDK2 reference implementation of EFI in 2018. (Itanium is the original architecture for which EFI was developed, and the way Linux supports it deviates significantly from other architectures.) Some distros, such as Debian and Gentoo, still maintain [unofficial] ia64 ports, but many have dropped support years ago. While the argument is being made [1] that there is a 'for the common good' angle to being able to build and run existing projects such as the Grid Community Toolkit [2] on Itanium for interoperability testing, the fact remains that none of those projects are known to be deployed on Linux/ia64, and very few people actually have access to such a system in the first place. Even if there were ways imaginable in which Linux/ia64 could be put to good use today, what matters is whether anyone is actually doing that, and this does not appear to be the case. There are no emulators widely available, and so boot testing Itanium is generally infeasible for ordinary contributors. GCC still supports IA-64 but its compile farm [3] no longer has any IA-64 machines. GLIBC would like to get rid of IA-64 [4] too because it would permit some overdue code cleanups. In summary, the benefits to the ecosystem of having IA-64 be part of it are mostly theoretical, whereas the maintenance overhead of keeping it supported is real. So let's rip off the band aid, and remove the IA-64 arch code entirely. This follows the timeline proposed by the Debian/ia64 maintainer [5], which removes support in a controlled manner, leaving IA-64 in a known good state in the most recent LTS release. Other projects will follow once the kernel support is removed. [0] https://lore.kernel.org/all/CAMj1kXFCMh_578jniKpUtx_j8ByHnt=s7S+yQ+vGbKt9ud7+kQ@mail.gmail.com/ [1] https://lore.kernel.org/all/0075883c-7c51-00f5-2c2d-5119c1820410@web.de/ [2] https://gridcf.org/gct-docs/latest/index.html [3] https://cfarm.tetaneutral.net/machines/list/ [4] https://lore.kernel.org/all/87bkiilpc4.fsf@mid.deneb.enyo.de/ [5] https://lore.kernel.org/all/ff58a3e76e5102c94bb5946d99187b358def688a.camel@physik.fu-berlin.de/ Acked-by: Tony Luck <tony.luck@intel.com> Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
193 lines
4.5 KiB
C
193 lines
4.5 KiB
C
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
/*
|
|
* GRU KERNEL MCS INSTRUCTIONS
|
|
*
|
|
* Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
#include "gru.h"
|
|
#include "grulib.h"
|
|
#include "grutables.h"
|
|
|
|
/* 10 sec */
|
|
#include <linux/sync_core.h>
|
|
#include <asm/tsc.h>
|
|
#define GRU_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
|
|
#define CLKS2NSEC(c) ((c) * 1000000 / tsc_khz)
|
|
|
|
/* Extract the status field from a kernel handle */
|
|
#define GET_MSEG_HANDLE_STATUS(h) (((*(unsigned long *)(h)) >> 16) & 3)
|
|
|
|
struct mcs_op_statistic mcs_op_statistics[mcsop_last];
|
|
|
|
static void update_mcs_stats(enum mcs_op op, unsigned long clks)
|
|
{
|
|
unsigned long nsec;
|
|
|
|
nsec = CLKS2NSEC(clks);
|
|
atomic_long_inc(&mcs_op_statistics[op].count);
|
|
atomic_long_add(nsec, &mcs_op_statistics[op].total);
|
|
if (mcs_op_statistics[op].max < nsec)
|
|
mcs_op_statistics[op].max = nsec;
|
|
}
|
|
|
|
static void start_instruction(void *h)
|
|
{
|
|
unsigned long *w0 = h;
|
|
|
|
wmb(); /* setting CMD/STATUS bits must be last */
|
|
*w0 = *w0 | 0x20001;
|
|
gru_flush_cache(h);
|
|
}
|
|
|
|
static void report_instruction_timeout(void *h)
|
|
{
|
|
unsigned long goff = GSEGPOFF((unsigned long)h);
|
|
char *id = "???";
|
|
|
|
if (TYPE_IS(CCH, goff))
|
|
id = "CCH";
|
|
else if (TYPE_IS(TGH, goff))
|
|
id = "TGH";
|
|
else if (TYPE_IS(TFH, goff))
|
|
id = "TFH";
|
|
|
|
panic(KERN_ALERT "GRU %p (%s) is malfunctioning\n", h, id);
|
|
}
|
|
|
|
static int wait_instruction_complete(void *h, enum mcs_op opc)
|
|
{
|
|
int status;
|
|
unsigned long start_time = get_cycles();
|
|
|
|
while (1) {
|
|
cpu_relax();
|
|
status = GET_MSEG_HANDLE_STATUS(h);
|
|
if (status != CCHSTATUS_ACTIVE)
|
|
break;
|
|
if (GRU_OPERATION_TIMEOUT < (get_cycles() - start_time)) {
|
|
report_instruction_timeout(h);
|
|
start_time = get_cycles();
|
|
}
|
|
}
|
|
if (gru_options & OPT_STATS)
|
|
update_mcs_stats(opc, get_cycles() - start_time);
|
|
return status;
|
|
}
|
|
|
|
int cch_allocate(struct gru_context_configuration_handle *cch)
|
|
{
|
|
int ret;
|
|
|
|
cch->opc = CCHOP_ALLOCATE;
|
|
start_instruction(cch);
|
|
ret = wait_instruction_complete(cch, cchop_allocate);
|
|
|
|
/*
|
|
* Stop speculation into the GSEG being mapped by the previous ALLOCATE.
|
|
* The GSEG memory does not exist until the ALLOCATE completes.
|
|
*/
|
|
sync_core();
|
|
return ret;
|
|
}
|
|
|
|
int cch_start(struct gru_context_configuration_handle *cch)
|
|
{
|
|
cch->opc = CCHOP_START;
|
|
start_instruction(cch);
|
|
return wait_instruction_complete(cch, cchop_start);
|
|
}
|
|
|
|
int cch_interrupt(struct gru_context_configuration_handle *cch)
|
|
{
|
|
cch->opc = CCHOP_INTERRUPT;
|
|
start_instruction(cch);
|
|
return wait_instruction_complete(cch, cchop_interrupt);
|
|
}
|
|
|
|
int cch_deallocate(struct gru_context_configuration_handle *cch)
|
|
{
|
|
int ret;
|
|
|
|
cch->opc = CCHOP_DEALLOCATE;
|
|
start_instruction(cch);
|
|
ret = wait_instruction_complete(cch, cchop_deallocate);
|
|
|
|
/*
|
|
* Stop speculation into the GSEG being unmapped by the previous
|
|
* DEALLOCATE.
|
|
*/
|
|
sync_core();
|
|
return ret;
|
|
}
|
|
|
|
int cch_interrupt_sync(struct gru_context_configuration_handle
|
|
*cch)
|
|
{
|
|
cch->opc = CCHOP_INTERRUPT_SYNC;
|
|
start_instruction(cch);
|
|
return wait_instruction_complete(cch, cchop_interrupt_sync);
|
|
}
|
|
|
|
int tgh_invalidate(struct gru_tlb_global_handle *tgh,
|
|
unsigned long vaddr, unsigned long vaddrmask,
|
|
int asid, int pagesize, int global, int n,
|
|
unsigned short ctxbitmap)
|
|
{
|
|
tgh->vaddr = vaddr;
|
|
tgh->asid = asid;
|
|
tgh->pagesize = pagesize;
|
|
tgh->n = n;
|
|
tgh->global = global;
|
|
tgh->vaddrmask = vaddrmask;
|
|
tgh->ctxbitmap = ctxbitmap;
|
|
tgh->opc = TGHOP_TLBINV;
|
|
start_instruction(tgh);
|
|
return wait_instruction_complete(tgh, tghop_invalidate);
|
|
}
|
|
|
|
int tfh_write_only(struct gru_tlb_fault_handle *tfh,
|
|
unsigned long paddr, int gaa,
|
|
unsigned long vaddr, int asid, int dirty,
|
|
int pagesize)
|
|
{
|
|
tfh->fillasid = asid;
|
|
tfh->fillvaddr = vaddr;
|
|
tfh->pfn = paddr >> GRU_PADDR_SHIFT;
|
|
tfh->gaa = gaa;
|
|
tfh->dirty = dirty;
|
|
tfh->pagesize = pagesize;
|
|
tfh->opc = TFHOP_WRITE_ONLY;
|
|
start_instruction(tfh);
|
|
return wait_instruction_complete(tfh, tfhop_write_only);
|
|
}
|
|
|
|
void tfh_write_restart(struct gru_tlb_fault_handle *tfh,
|
|
unsigned long paddr, int gaa,
|
|
unsigned long vaddr, int asid, int dirty,
|
|
int pagesize)
|
|
{
|
|
tfh->fillasid = asid;
|
|
tfh->fillvaddr = vaddr;
|
|
tfh->pfn = paddr >> GRU_PADDR_SHIFT;
|
|
tfh->gaa = gaa;
|
|
tfh->dirty = dirty;
|
|
tfh->pagesize = pagesize;
|
|
tfh->opc = TFHOP_WRITE_RESTART;
|
|
start_instruction(tfh);
|
|
}
|
|
|
|
void tfh_user_polling_mode(struct gru_tlb_fault_handle *tfh)
|
|
{
|
|
tfh->opc = TFHOP_USER_POLLING_MODE;
|
|
start_instruction(tfh);
|
|
}
|
|
|
|
void tfh_exception(struct gru_tlb_fault_handle *tfh)
|
|
{
|
|
tfh->opc = TFHOP_EXCEPTION;
|
|
start_instruction(tfh);
|
|
}
|
|
|