[PATCH] sata_sil: convert to new EH

Convert sata_sil to new EH.  As these controllers have hardware
interrupt mask and are known to have screaming interrupts issues, use
hardware IRQ masking for freezing.  sil_freeze() masks interrupts for
the port and sil_thaw() unmasks them.  As ports are automatically
frozen before probing reset, there is no need to initialize interrupt
masks sil_init_onde().  Remove related code.

Other than freezing, sata_sil uses stock BMDMA EH routines.

Signed-off-by: Tejun Heo <htejun@gmail.com>
This commit is contained in:
Tejun Heo 2006-05-15 20:58:27 +09:00
parent 3f037db0ba
commit f6aae27ed0

View File

@ -96,6 +96,8 @@ static void sil_dev_config(struct ata_port *ap, struct ata_device *dev);
static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg); static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg);
static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
static void sil_post_set_mode (struct ata_port *ap); static void sil_post_set_mode (struct ata_port *ap);
static void sil_freeze(struct ata_port *ap);
static void sil_thaw(struct ata_port *ap);
static const struct pci_device_id sil_pci_tbl[] = { static const struct pci_device_id sil_pci_tbl[] = {
@ -174,7 +176,10 @@ static const struct ata_port_operations sil_ops = {
.bmdma_status = ata_bmdma_status, .bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep, .qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot, .qc_issue = ata_qc_issue_prot,
.eng_timeout = ata_eng_timeout, .freeze = sil_freeze,
.thaw = sil_thaw,
.error_handler = ata_bmdma_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.irq_handler = ata_interrupt, .irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear, .irq_clear = ata_bmdma_irq_clear,
.scr_read = sil_scr_read, .scr_read = sil_scr_read,
@ -314,6 +319,33 @@ static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
writel(val, mmio); writel(val, mmio);
} }
static void sil_freeze(struct ata_port *ap)
{
void __iomem *mmio_base = ap->host_set->mmio_base;
u32 tmp;
/* plug IRQ */
tmp = readl(mmio_base + SIL_SYSCFG);
tmp |= SIL_MASK_IDE0_INT << ap->port_no;
writel(tmp, mmio_base + SIL_SYSCFG);
readl(mmio_base + SIL_SYSCFG); /* flush */
}
static void sil_thaw(struct ata_port *ap)
{
void __iomem *mmio_base = ap->host_set->mmio_base;
u32 tmp;
/* clear IRQ */
ata_chk_status(ap);
ata_bmdma_irq_clear(ap);
/* turn on IRQ */
tmp = readl(mmio_base + SIL_SYSCFG);
tmp &= ~(SIL_MASK_IDE0_INT << ap->port_no);
writel(tmp, mmio_base + SIL_SYSCFG);
}
/** /**
* sil_dev_config - Apply device/host-specific errata fixups * sil_dev_config - Apply device/host-specific errata fixups
* @ap: Port containing device to be examined * @ap: Port containing device to be examined
@ -384,7 +416,7 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
int rc; int rc;
unsigned int i; unsigned int i;
int pci_dev_busy = 0; int pci_dev_busy = 0;
u32 tmp, irq_mask; u32 tmp;
u8 cls; u8 cls;
if (!printed_version++) if (!printed_version++)
@ -474,24 +506,11 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
} }
if (ent->driver_data == sil_3114) { if (ent->driver_data == sil_3114) {
irq_mask = SIL_MASK_4PORT;
/* flip the magic "make 4 ports work" bit */ /* flip the magic "make 4 ports work" bit */
tmp = readl(mmio_base + sil_port[2].bmdma); tmp = readl(mmio_base + sil_port[2].bmdma);
if ((tmp & SIL_INTR_STEERING) == 0) if ((tmp & SIL_INTR_STEERING) == 0)
writel(tmp | SIL_INTR_STEERING, writel(tmp | SIL_INTR_STEERING,
mmio_base + sil_port[2].bmdma); mmio_base + sil_port[2].bmdma);
} else {
irq_mask = SIL_MASK_2PORT;
}
/* make sure IDE0/1/2/3 interrupts are not masked */
tmp = readl(mmio_base + SIL_SYSCFG);
if (tmp & irq_mask) {
tmp &= ~irq_mask;
writel(tmp, mmio_base + SIL_SYSCFG);
readl(mmio_base + SIL_SYSCFG); /* flush */
} }
/* mask all SATA phy-related interrupts */ /* mask all SATA phy-related interrupts */