ioat: cleanup ->timer_fn() and ->cleanup_fn() prototypes
If the calling convention of ->timer_fn() and ->cleanup_fn() are unified across hardware versions we can drop parameters to ioat_init_channel() and unify ioat_is_dma_complete() implementations. Both ->timer_fn() and ->cleanup_fn() are modified to expect a struct dma_chan pointer. Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
		
							parent
							
								
									b9cc98697d
								
							
						
					
					
						commit
						aa4d72ae94
					
				| @ -94,16 +94,12 @@ static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data) | ||||
| 	return IRQ_HANDLED; | ||||
| } | ||||
| 
 | ||||
| static void ioat1_cleanup_tasklet(unsigned long data); | ||||
| 
 | ||||
| /* common channel initialization */ | ||||
| void ioat_init_channel(struct ioatdma_device *device, | ||||
| 		       struct ioat_chan_common *chan, int idx, | ||||
| 		       void (*timer_fn)(unsigned long), | ||||
| 		       void (*tasklet)(unsigned long), | ||||
| 		       unsigned long ioat) | ||||
| void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *chan, int idx) | ||||
| { | ||||
| 	struct dma_device *dma = &device->common; | ||||
| 	struct dma_chan *c = &chan->common; | ||||
| 	unsigned long data = (unsigned long) c; | ||||
| 
 | ||||
| 	chan->device = device; | ||||
| 	chan->reg_base = device->reg_base + (0x80 * (idx + 1)); | ||||
| @ -112,14 +108,12 @@ void ioat_init_channel(struct ioatdma_device *device, | ||||
| 	list_add_tail(&chan->common.device_node, &dma->channels); | ||||
| 	device->idx[idx] = chan; | ||||
| 	init_timer(&chan->timer); | ||||
| 	chan->timer.function = timer_fn; | ||||
| 	chan->timer.data = ioat; | ||||
| 	tasklet_init(&chan->cleanup_task, tasklet, ioat); | ||||
| 	chan->timer.function = device->timer_fn; | ||||
| 	chan->timer.data = data; | ||||
| 	tasklet_init(&chan->cleanup_task, device->cleanup_fn, data); | ||||
| 	tasklet_disable(&chan->cleanup_task); | ||||
| } | ||||
| 
 | ||||
| static void ioat1_timer_event(unsigned long data); | ||||
| 
 | ||||
| /**
 | ||||
|  * ioat1_dma_enumerate_channels - find and initialize the device's channels | ||||
|  * @device: the device to be enumerated | ||||
| @ -155,10 +149,7 @@ static int ioat1_enumerate_channels(struct ioatdma_device *device) | ||||
| 		if (!ioat) | ||||
| 			break; | ||||
| 
 | ||||
| 		ioat_init_channel(device, &ioat->base, i, | ||||
| 				  ioat1_timer_event, | ||||
| 				  ioat1_cleanup_tasklet, | ||||
| 				  (unsigned long) ioat); | ||||
| 		ioat_init_channel(device, &ioat->base, i); | ||||
| 		ioat->xfercap = xfercap; | ||||
| 		spin_lock_init(&ioat->desc_lock); | ||||
| 		INIT_LIST_HEAD(&ioat->free_desc); | ||||
| @ -532,12 +523,12 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, | ||||
| 	return &desc->txd; | ||||
| } | ||||
| 
 | ||||
| static void ioat1_cleanup_tasklet(unsigned long data) | ||||
| static void ioat1_cleanup_event(unsigned long data) | ||||
| { | ||||
| 	struct ioat_dma_chan *chan = (void *)data; | ||||
| 	struct ioat_dma_chan *ioat = to_ioat_chan((void *) data); | ||||
| 
 | ||||
| 	ioat1_cleanup(chan); | ||||
| 	writew(IOAT_CHANCTRL_RUN, chan->base.reg_base + IOAT_CHANCTRL_OFFSET); | ||||
| 	ioat1_cleanup(ioat); | ||||
| 	writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); | ||||
| } | ||||
| 
 | ||||
| void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, | ||||
| @ -687,7 +678,7 @@ static void ioat1_cleanup(struct ioat_dma_chan *ioat) | ||||
| 
 | ||||
| static void ioat1_timer_event(unsigned long data) | ||||
| { | ||||
| 	struct ioat_dma_chan *ioat = (void *) data; | ||||
| 	struct ioat_dma_chan *ioat = to_ioat_chan((void *) data); | ||||
| 	struct ioat_chan_common *chan = &ioat->base; | ||||
| 
 | ||||
| 	dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state); | ||||
| @ -734,16 +725,17 @@ static void ioat1_timer_event(unsigned long data) | ||||
| 	spin_unlock_bh(&chan->cleanup_lock); | ||||
| } | ||||
| 
 | ||||
| static enum dma_status | ||||
| ioat1_dma_is_complete(struct dma_chan *c, dma_cookie_t cookie, | ||||
| enum dma_status | ||||
| ioat_is_dma_complete(struct dma_chan *c, dma_cookie_t cookie, | ||||
| 		      dma_cookie_t *done, dma_cookie_t *used) | ||||
| { | ||||
| 	struct ioat_dma_chan *ioat = to_ioat_chan(c); | ||||
| 	struct ioat_chan_common *chan = to_chan_common(c); | ||||
| 	struct ioatdma_device *device = chan->device; | ||||
| 
 | ||||
| 	if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) | ||||
| 		return DMA_SUCCESS; | ||||
| 
 | ||||
| 	ioat1_cleanup(ioat); | ||||
| 	device->cleanup_fn((unsigned long) c); | ||||
| 
 | ||||
| 	return ioat_is_complete(c, cookie, done, used); | ||||
| } | ||||
| @ -1199,12 +1191,14 @@ int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca) | ||||
| 	device->intr_quirk = ioat1_intr_quirk; | ||||
| 	device->enumerate_channels = ioat1_enumerate_channels; | ||||
| 	device->self_test = ioat_dma_self_test; | ||||
| 	device->timer_fn = ioat1_timer_event; | ||||
| 	device->cleanup_fn = ioat1_cleanup_event; | ||||
| 	dma = &device->common; | ||||
| 	dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy; | ||||
| 	dma->device_issue_pending = ioat1_dma_memcpy_issue_pending; | ||||
| 	dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources; | ||||
| 	dma->device_free_chan_resources = ioat1_dma_free_chan_resources; | ||||
| 	dma->device_is_tx_complete = ioat1_dma_is_complete; | ||||
| 	dma->device_is_tx_complete = ioat_is_dma_complete; | ||||
| 
 | ||||
| 	err = ioat_probe(device); | ||||
| 	if (err) | ||||
|  | ||||
| @ -61,7 +61,7 @@ | ||||
|  * @intr_quirk: interrupt setup quirk (for ioat_v1 devices) | ||||
|  * @enumerate_channels: hw version specific channel enumeration | ||||
|  * @reset_hw: hw version specific channel (re)initialization | ||||
|  * @cleanup_tasklet: select between the v2 and v3 cleanup routines | ||||
|  * @cleanup_fn: select between the v2 and v3 cleanup routines | ||||
|  * @timer_fn: select between the v2 and v3 timer watchdog routines | ||||
|  * @self_test: hardware version specific self test for each supported op type | ||||
|  * | ||||
| @ -80,7 +80,7 @@ struct ioatdma_device { | ||||
| 	void (*intr_quirk)(struct ioatdma_device *device); | ||||
| 	int (*enumerate_channels)(struct ioatdma_device *device); | ||||
| 	int (*reset_hw)(struct ioat_chan_common *chan); | ||||
| 	void (*cleanup_tasklet)(unsigned long data); | ||||
| 	void (*cleanup_fn)(unsigned long data); | ||||
| 	void (*timer_fn)(unsigned long data); | ||||
| 	int (*self_test)(struct ioatdma_device *device); | ||||
| }; | ||||
| @ -337,10 +337,9 @@ struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev, | ||||
| 					      void __iomem *iobase); | ||||
| unsigned long ioat_get_current_completion(struct ioat_chan_common *chan); | ||||
| void ioat_init_channel(struct ioatdma_device *device, | ||||
| 		       struct ioat_chan_common *chan, int idx, | ||||
| 		       void (*timer_fn)(unsigned long), | ||||
| 		       void (*tasklet)(unsigned long), | ||||
| 		       unsigned long ioat); | ||||
| 		       struct ioat_chan_common *chan, int idx); | ||||
| enum dma_status ioat_is_dma_complete(struct dma_chan *c, dma_cookie_t cookie, | ||||
| 				     dma_cookie_t *done, dma_cookie_t *used); | ||||
| void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, | ||||
| 		    size_t len, struct ioat_dma_descriptor *hw); | ||||
| bool ioat_cleanup_preamble(struct ioat_chan_common *chan, | ||||
|  | ||||
| @ -199,9 +199,9 @@ static void ioat2_cleanup(struct ioat2_dma_chan *ioat) | ||||
| 	spin_unlock_bh(&chan->cleanup_lock); | ||||
| } | ||||
| 
 | ||||
| void ioat2_cleanup_tasklet(unsigned long data) | ||||
| void ioat2_cleanup_event(unsigned long data) | ||||
| { | ||||
| 	struct ioat2_dma_chan *ioat = (void *) data; | ||||
| 	struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); | ||||
| 
 | ||||
| 	ioat2_cleanup(ioat); | ||||
| 	writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); | ||||
| @ -283,7 +283,7 @@ static void ioat2_restart_channel(struct ioat2_dma_chan *ioat) | ||||
| 
 | ||||
| void ioat2_timer_event(unsigned long data) | ||||
| { | ||||
| 	struct ioat2_dma_chan *ioat = (void *) data; | ||||
| 	struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); | ||||
| 	struct ioat_chan_common *chan = &ioat->base; | ||||
| 
 | ||||
| 	spin_lock_bh(&chan->cleanup_lock); | ||||
| @ -389,10 +389,7 @@ int ioat2_enumerate_channels(struct ioatdma_device *device) | ||||
| 		if (!ioat) | ||||
| 			break; | ||||
| 
 | ||||
| 		ioat_init_channel(device, &ioat->base, i, | ||||
| 				  device->timer_fn, | ||||
| 				  device->cleanup_tasklet, | ||||
| 				  (unsigned long) ioat); | ||||
| 		ioat_init_channel(device, &ioat->base, i); | ||||
| 		ioat->xfercap_log = xfercap_log; | ||||
| 		spin_lock_init(&ioat->ring_lock); | ||||
| 		if (device->reset_hw(&ioat->base)) { | ||||
| @ -692,7 +689,7 @@ int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs) | ||||
| 
 | ||||
| 			mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||||
| 			spin_unlock_bh(&chan->cleanup_lock); | ||||
| 			device->timer_fn((unsigned long) ioat); | ||||
| 			device->timer_fn((unsigned long) &chan->common); | ||||
| 		} else | ||||
| 			spin_unlock_bh(&chan->cleanup_lock); | ||||
| 		return -ENOMEM; | ||||
| @ -776,7 +773,7 @@ void ioat2_free_chan_resources(struct dma_chan *c) | ||||
| 
 | ||||
| 	tasklet_disable(&chan->cleanup_task); | ||||
| 	del_timer_sync(&chan->timer); | ||||
| 	device->cleanup_tasklet((unsigned long) ioat); | ||||
| 	device->cleanup_fn((unsigned long) c); | ||||
| 	device->reset_hw(chan); | ||||
| 
 | ||||
| 	spin_lock_bh(&ioat->ring_lock); | ||||
| @ -809,21 +806,6 @@ void ioat2_free_chan_resources(struct dma_chan *c) | ||||
| 	ioat->dmacount = 0; | ||||
| } | ||||
| 
 | ||||
| enum dma_status | ||||
| ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie, | ||||
| 		     dma_cookie_t *done, dma_cookie_t *used) | ||||
| { | ||||
| 	struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | ||||
| 	struct ioatdma_device *device = ioat->base.device; | ||||
| 
 | ||||
| 	if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) | ||||
| 		return DMA_SUCCESS; | ||||
| 
 | ||||
| 	device->cleanup_tasklet((unsigned long) ioat); | ||||
| 
 | ||||
| 	return ioat_is_complete(c, cookie, done, used); | ||||
| } | ||||
| 
 | ||||
| static ssize_t ring_size_show(struct dma_chan *c, char *page) | ||||
| { | ||||
| 	struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | ||||
| @ -864,7 +846,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca) | ||||
| 
 | ||||
| 	device->enumerate_channels = ioat2_enumerate_channels; | ||||
| 	device->reset_hw = ioat2_reset_hw; | ||||
| 	device->cleanup_tasklet = ioat2_cleanup_tasklet; | ||||
| 	device->cleanup_fn = ioat2_cleanup_event; | ||||
| 	device->timer_fn = ioat2_timer_event; | ||||
| 	device->self_test = ioat_dma_self_test; | ||||
| 	dma = &device->common; | ||||
| @ -872,7 +854,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca) | ||||
| 	dma->device_issue_pending = ioat2_issue_pending; | ||||
| 	dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; | ||||
| 	dma->device_free_chan_resources = ioat2_free_chan_resources; | ||||
| 	dma->device_is_tx_complete = ioat2_is_complete; | ||||
| 	dma->device_is_tx_complete = ioat_is_dma_complete; | ||||
| 
 | ||||
| 	err = ioat_probe(device); | ||||
| 	if (err) | ||||
|  | ||||
| @ -176,12 +176,10 @@ ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, | ||||
| void ioat2_issue_pending(struct dma_chan *chan); | ||||
| int ioat2_alloc_chan_resources(struct dma_chan *c); | ||||
| void ioat2_free_chan_resources(struct dma_chan *c); | ||||
| enum dma_status ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie, | ||||
| 				  dma_cookie_t *done, dma_cookie_t *used); | ||||
| void __ioat2_restart_chan(struct ioat2_dma_chan *ioat); | ||||
| bool reshape_ring(struct ioat2_dma_chan *ioat, int order); | ||||
| void __ioat2_issue_pending(struct ioat2_dma_chan *ioat); | ||||
| void ioat2_cleanup_tasklet(unsigned long data); | ||||
| void ioat2_cleanup_event(unsigned long data); | ||||
| void ioat2_timer_event(unsigned long data); | ||||
| int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo); | ||||
| int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo); | ||||
|  | ||||
| @ -358,9 +358,9 @@ static void ioat3_cleanup_sync(struct ioat2_dma_chan *ioat) | ||||
| 	spin_unlock_bh(&chan->cleanup_lock); | ||||
| } | ||||
| 
 | ||||
| static void ioat3_cleanup_tasklet(unsigned long data) | ||||
| static void ioat3_cleanup_event(unsigned long data) | ||||
| { | ||||
| 	struct ioat2_dma_chan *ioat = (void *) data; | ||||
| 	struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); | ||||
| 
 | ||||
| 	ioat3_cleanup_sync(ioat); | ||||
| 	writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); | ||||
| @ -380,7 +380,7 @@ static void ioat3_restart_channel(struct ioat2_dma_chan *ioat) | ||||
| 
 | ||||
| static void ioat3_timer_event(unsigned long data) | ||||
| { | ||||
| 	struct ioat2_dma_chan *ioat = (void *) data; | ||||
| 	struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); | ||||
| 	struct ioat_chan_common *chan = &ioat->base; | ||||
| 
 | ||||
| 	spin_lock_bh(&chan->cleanup_lock); | ||||
| @ -1259,11 +1259,11 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) | ||||
| 
 | ||||
| 	if (is_raid_device) { | ||||
| 		dma->device_is_tx_complete = ioat3_is_complete; | ||||
| 		device->cleanup_tasklet = ioat3_cleanup_tasklet; | ||||
| 		device->cleanup_fn = ioat3_cleanup_event; | ||||
| 		device->timer_fn = ioat3_timer_event; | ||||
| 	} else { | ||||
| 		dma->device_is_tx_complete = ioat2_is_complete; | ||||
| 		device->cleanup_tasklet = ioat2_cleanup_tasklet; | ||||
| 		dma->device_is_tx_complete = ioat_is_dma_complete; | ||||
| 		device->cleanup_fn = ioat2_cleanup_event; | ||||
| 		device->timer_fn = ioat2_timer_event; | ||||
| 	} | ||||
| 
 | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user