Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull misc block fixes from Jens Axboe:
 "Stuff that got collected after the merge window opened.  This
  contains:
   - NVMe:
        - Fix for non-striped transfer size setting for NVMe from
          Sathyavathi.
        - (Some) support for the weird Apple nvme controller in the
          macbooks. From Stephan Günther.
   - The error value leak for dax from Al.
   - A few minor blk-mq tweaks from me.
   - Add the new linux-block@vger.kernel.org mailing list to the
     MAINTAINERS file.
   - Discard fix for brd, from Jan.
   - A kerneldoc warning for block core from Randy.
   - An older fix from Vivek, converting a WARN_ON() to a rate limited
     printk when a device is hot removed with dirty inodes"
* 'for-linus' of git://git.kernel.dk/linux-block:
  block: don't hardcode blk_qc_t -> tag mask
  dax_io(): don't let non-error value escape via retval instead of EFAULT
  block: fix blk-core.c kernel-doc warning
  fs/block_dev.c: Remove WARN_ON() when inode writeback fails
  NVMe: add support for Apple NVMe controller
  NVMe: use split lo_hi_{read,write}q
  blk-mq: mark __blk_mq_complete_request() static
  MAINTAINERS: add reference to new linux-block list
  NVMe: Increase the max transfer size when mdts is 0
  brd: Refuse improperly aligned discard requests
			
			
This commit is contained in:
		
						commit
						5e2078b289
					
				| @ -2210,6 +2210,7 @@ F:	drivers/leds/leds-blinkm.c | ||||
| 
 | ||||
| BLOCK LAYER | ||||
| M:	Jens Axboe <axboe@kernel.dk> | ||||
| L:	linux-block@vger.kernel.org | ||||
| T:	git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git | ||||
| S:	Maintained | ||||
| F:	block/ | ||||
|  | ||||
| @ -1575,6 +1575,9 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req, | ||||
|  * @q: request_queue new bio is being queued at | ||||
|  * @bio: new bio being queued | ||||
|  * @request_count: out parameter for number of traversed plugged requests | ||||
|  * @same_queue_rq: pointer to &struct request that gets filled in when | ||||
|  * another request associated with @q is found on the plug list | ||||
|  * (optional, may be %NULL) | ||||
|  * | ||||
|  * Determine whether @bio being queued on @q can be merged with a request | ||||
|  * on %current's plugged list.  Returns %true if merge was successful, | ||||
|  | ||||
| @ -358,7 +358,7 @@ static void blk_mq_ipi_complete_request(struct request *rq) | ||||
| 	put_cpu(); | ||||
| } | ||||
| 
 | ||||
| void __blk_mq_complete_request(struct request *rq) | ||||
| static void __blk_mq_complete_request(struct request *rq) | ||||
| { | ||||
| 	struct request_queue *q = rq->q; | ||||
| 
 | ||||
|  | ||||
| @ -25,7 +25,6 @@ struct blk_mq_ctx { | ||||
| 	struct kobject		kobj; | ||||
| } ____cacheline_aligned_in_smp; | ||||
| 
 | ||||
| void __blk_mq_complete_request(struct request *rq); | ||||
| void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); | ||||
| void blk_mq_freeze_queue(struct request_queue *q); | ||||
| void blk_mq_free_queue(struct request_queue *q); | ||||
|  | ||||
| @ -337,6 +337,9 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio) | ||||
| 		goto io_error; | ||||
| 
 | ||||
| 	if (unlikely(bio->bi_rw & REQ_DISCARD)) { | ||||
| 		if (sector & ((PAGE_SIZE >> SECTOR_SHIFT) - 1) || | ||||
| 		    bio->bi_iter.bi_size & PAGE_MASK) | ||||
| 			goto io_error; | ||||
| 		discard_from_brd(brd, sector, bio->bi_iter.bi_size); | ||||
| 		goto out; | ||||
| 	} | ||||
|  | ||||
| @ -1725,7 +1725,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) | ||||
| { | ||||
| 	int result; | ||||
| 	u32 aqa; | ||||
| 	u64 cap = readq(&dev->bar->cap); | ||||
| 	u64 cap = lo_hi_readq(&dev->bar->cap); | ||||
| 	struct nvme_queue *nvmeq; | ||||
| 	unsigned page_shift = PAGE_SHIFT; | ||||
| 	unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12; | ||||
| @ -1774,8 +1774,8 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) | ||||
| 	dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; | ||||
| 
 | ||||
| 	writel(aqa, &dev->bar->aqa); | ||||
| 	writeq(nvmeq->sq_dma_addr, &dev->bar->asq); | ||||
| 	writeq(nvmeq->cq_dma_addr, &dev->bar->acq); | ||||
| 	lo_hi_writeq(nvmeq->sq_dma_addr, &dev->bar->asq); | ||||
| 	lo_hi_writeq(nvmeq->cq_dma_addr, &dev->bar->acq); | ||||
| 
 | ||||
| 	result = nvme_enable_ctrl(dev, cap); | ||||
| 	if (result) | ||||
| @ -2606,7 +2606,7 @@ static int nvme_dev_add(struct nvme_dev *dev) | ||||
| 	struct pci_dev *pdev = to_pci_dev(dev->dev); | ||||
| 	int res; | ||||
| 	struct nvme_id_ctrl *ctrl; | ||||
| 	int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12; | ||||
| 	int shift = NVME_CAP_MPSMIN(lo_hi_readq(&dev->bar->cap)) + 12; | ||||
| 
 | ||||
| 	res = nvme_identify_ctrl(dev, &ctrl); | ||||
| 	if (res) { | ||||
| @ -2622,6 +2622,8 @@ static int nvme_dev_add(struct nvme_dev *dev) | ||||
| 	memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr)); | ||||
| 	if (ctrl->mdts) | ||||
| 		dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9); | ||||
| 	else | ||||
| 		dev->max_hw_sectors = UINT_MAX; | ||||
| 	if ((pdev->vendor == PCI_VENDOR_ID_INTEL) && | ||||
| 			(pdev->device == 0x0953) && ctrl->vs[3]) { | ||||
| 		unsigned int max_hw_sectors; | ||||
| @ -2695,7 +2697,7 @@ static int nvme_dev_map(struct nvme_dev *dev) | ||||
| 			goto unmap; | ||||
| 	} | ||||
| 
 | ||||
| 	cap = readq(&dev->bar->cap); | ||||
| 	cap = lo_hi_readq(&dev->bar->cap); | ||||
| 	dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH); | ||||
| 	dev->db_stride = 1 << NVME_CAP_STRIDE(cap); | ||||
| 	dev->dbs = ((void __iomem *)dev->bar) + 4096; | ||||
| @ -2758,7 +2760,7 @@ static void nvme_wait_dq(struct nvme_delq_ctx *dq, struct nvme_dev *dev) | ||||
| 			 * queues than admin tags. | ||||
| 			 */ | ||||
| 			set_current_state(TASK_RUNNING); | ||||
| 			nvme_disable_ctrl(dev, readq(&dev->bar->cap)); | ||||
| 			nvme_disable_ctrl(dev, lo_hi_readq(&dev->bar->cap)); | ||||
| 			nvme_clear_queue(dev->queues[0]); | ||||
| 			flush_kthread_worker(dq->worker); | ||||
| 			nvme_disable_queue(dev, 0); | ||||
| @ -3401,6 +3403,7 @@ static const struct pci_error_handlers nvme_err_handler = { | ||||
| 
 | ||||
| static const struct pci_device_id nvme_id_table[] = { | ||||
| 	{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, | ||||
| 	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) }, | ||||
| 	{ 0, } | ||||
| }; | ||||
| MODULE_DEVICE_TABLE(pci, nvme_id_table); | ||||
|  | ||||
| @ -50,12 +50,21 @@ struct block_device *I_BDEV(struct inode *inode) | ||||
| } | ||||
| EXPORT_SYMBOL(I_BDEV); | ||||
| 
 | ||||
| static void bdev_write_inode(struct inode *inode) | ||||
| static void bdev_write_inode(struct block_device *bdev) | ||||
| { | ||||
| 	struct inode *inode = bdev->bd_inode; | ||||
| 	int ret; | ||||
| 
 | ||||
| 	spin_lock(&inode->i_lock); | ||||
| 	while (inode->i_state & I_DIRTY) { | ||||
| 		spin_unlock(&inode->i_lock); | ||||
| 		WARN_ON_ONCE(write_inode_now(inode, true)); | ||||
| 		ret = write_inode_now(inode, true); | ||||
| 		if (ret) { | ||||
| 			char name[BDEVNAME_SIZE]; | ||||
| 			pr_warn_ratelimited("VFS: Dirty inode writeback failed " | ||||
| 					    "for block device %s (err=%d).\n", | ||||
| 					    bdevname(bdev, name), ret); | ||||
| 		} | ||||
| 		spin_lock(&inode->i_lock); | ||||
| 	} | ||||
| 	spin_unlock(&inode->i_lock); | ||||
| @ -1504,7 +1513,7 @@ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part) | ||||
| 		 * ->release can cause the queue to disappear, so flush all | ||||
| 		 * dirty data before. | ||||
| 		 */ | ||||
| 		bdev_write_inode(bdev->bd_inode); | ||||
| 		bdev_write_inode(bdev); | ||||
| 	} | ||||
| 	if (bdev->bd_contains == bdev) { | ||||
| 		if (disk->fops->release) | ||||
|  | ||||
							
								
								
									
										4
									
								
								fs/dax.c
									
									
									
									
									
								
							
							
						
						
									
										4
									
								
								fs/dax.c
									
									
									
									
									
								
							| @ -174,8 +174,10 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter, | ||||
| 		else | ||||
| 			len = iov_iter_zero(max - pos, iter); | ||||
| 
 | ||||
| 		if (!len) | ||||
| 		if (!len) { | ||||
| 			retval = -EFAULT; | ||||
| 			break; | ||||
| 		} | ||||
| 
 | ||||
| 		pos += len; | ||||
| 		addr += len; | ||||
|  | ||||
| @ -265,7 +265,7 @@ static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie) | ||||
| 
 | ||||
| static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie) | ||||
| { | ||||
| 	return cookie & 0xffff; | ||||
| 	return cookie & ((1u << BLK_QC_T_SHIFT) - 1); | ||||
| } | ||||
| 
 | ||||
| #endif /* __LINUX_BLK_TYPES_H */ | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user