lightnvm: move metadata mapping to lower level driver
Now that blk_rq_map_kern can map both kmem and vmem, move internal metadata mapping down to the lower level driver. Reviewed-by: Javier González <javier@javigon.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Hans Holmberg <hans@owltronix.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
		
							parent
							
								
									98d87f70f4
								
							
						
					
					
						commit
						48e5da7255
					
				| @ -731,7 +731,7 @@ static int nvm_set_flags(struct nvm_geo *geo, struct nvm_rq *rqd) | ||||
| 	return flags; | ||||
| } | ||||
| 
 | ||||
| int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd) | ||||
| int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd, void *buf) | ||||
| { | ||||
| 	struct nvm_dev *dev = tgt_dev->parent; | ||||
| 	int ret; | ||||
| @ -745,7 +745,7 @@ int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd) | ||||
| 	rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd); | ||||
| 
 | ||||
| 	/* In case of error, fail with right address format */ | ||||
| 	ret = dev->ops->submit_io(dev, rqd); | ||||
| 	ret = dev->ops->submit_io(dev, rqd, buf); | ||||
| 	if (ret) | ||||
| 		nvm_rq_dev_to_tgt(tgt_dev, rqd); | ||||
| 	return ret; | ||||
| @ -759,7 +759,8 @@ static void nvm_sync_end_io(struct nvm_rq *rqd) | ||||
| 	complete(waiting); | ||||
| } | ||||
| 
 | ||||
| static int nvm_submit_io_wait(struct nvm_dev *dev, struct nvm_rq *rqd) | ||||
| static int nvm_submit_io_wait(struct nvm_dev *dev, struct nvm_rq *rqd, | ||||
| 			      void *buf) | ||||
| { | ||||
| 	DECLARE_COMPLETION_ONSTACK(wait); | ||||
| 	int ret = 0; | ||||
| @ -767,7 +768,7 @@ static int nvm_submit_io_wait(struct nvm_dev *dev, struct nvm_rq *rqd) | ||||
| 	rqd->end_io = nvm_sync_end_io; | ||||
| 	rqd->private = &wait; | ||||
| 
 | ||||
| 	ret = dev->ops->submit_io(dev, rqd); | ||||
| 	ret = dev->ops->submit_io(dev, rqd, buf); | ||||
| 	if (ret) | ||||
| 		return ret; | ||||
| 
 | ||||
| @ -776,7 +777,8 @@ static int nvm_submit_io_wait(struct nvm_dev *dev, struct nvm_rq *rqd) | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd) | ||||
| int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd, | ||||
| 		       void *buf) | ||||
| { | ||||
| 	struct nvm_dev *dev = tgt_dev->parent; | ||||
| 	int ret; | ||||
| @ -789,7 +791,7 @@ int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd) | ||||
| 	rqd->dev = tgt_dev; | ||||
| 	rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd); | ||||
| 
 | ||||
| 	ret = nvm_submit_io_wait(dev, rqd); | ||||
| 	ret = nvm_submit_io_wait(dev, rqd, buf); | ||||
| 
 | ||||
| 	return ret; | ||||
| } | ||||
| @ -816,7 +818,7 @@ static int nvm_submit_io_sync_raw(struct nvm_dev *dev, struct nvm_rq *rqd) | ||||
| 	rqd->dev = NULL; | ||||
| 	rqd->flags = nvm_set_flags(&dev->geo, rqd); | ||||
| 
 | ||||
| 	return nvm_submit_io_wait(dev, rqd); | ||||
| 	return nvm_submit_io_wait(dev, rqd, NULL); | ||||
| } | ||||
| 
 | ||||
| static int nvm_bb_chunk_sense(struct nvm_dev *dev, struct ppa_addr ppa) | ||||
|  | ||||
| @ -507,7 +507,7 @@ void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write) | ||||
| 	pblk->sec_per_write = sec_per_write; | ||||
| } | ||||
| 
 | ||||
| int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd) | ||||
| int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd, void *buf) | ||||
| { | ||||
| 	struct nvm_tgt_dev *dev = pblk->dev; | ||||
| 
 | ||||
| @ -518,7 +518,7 @@ int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd) | ||||
| 		return NVM_IO_ERR; | ||||
| #endif | ||||
| 
 | ||||
| 	return nvm_submit_io(dev, rqd); | ||||
| 	return nvm_submit_io(dev, rqd, buf); | ||||
| } | ||||
| 
 | ||||
| void pblk_check_chunk_state_update(struct pblk *pblk, struct nvm_rq *rqd) | ||||
| @ -541,7 +541,7 @@ void pblk_check_chunk_state_update(struct pblk *pblk, struct nvm_rq *rqd) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd) | ||||
| int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd, void *buf) | ||||
| { | ||||
| 	struct nvm_tgt_dev *dev = pblk->dev; | ||||
| 	int ret; | ||||
| @ -553,7 +553,7 @@ int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd) | ||||
| 		return NVM_IO_ERR; | ||||
| #endif | ||||
| 
 | ||||
| 	ret = nvm_submit_io_sync(dev, rqd); | ||||
| 	ret = nvm_submit_io_sync(dev, rqd, buf); | ||||
| 
 | ||||
| 	if (trace_pblk_chunk_state_enabled() && !ret && | ||||
| 	    rqd->opcode == NVM_OP_PWRITE) | ||||
| @ -562,65 +562,19 @@ int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd) | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| int pblk_submit_io_sync_sem(struct pblk *pblk, struct nvm_rq *rqd) | ||||
| static int pblk_submit_io_sync_sem(struct pblk *pblk, struct nvm_rq *rqd, | ||||
| 				   void *buf) | ||||
| { | ||||
| 	struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd); | ||||
| 	int ret; | ||||
| 
 | ||||
| 	pblk_down_chunk(pblk, ppa_list[0]); | ||||
| 	ret = pblk_submit_io_sync(pblk, rqd); | ||||
| 	ret = pblk_submit_io_sync(pblk, rqd, buf); | ||||
| 	pblk_up_chunk(pblk, ppa_list[0]); | ||||
| 
 | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| static void pblk_bio_map_addr_endio(struct bio *bio) | ||||
| { | ||||
| 	bio_put(bio); | ||||
| } | ||||
| 
 | ||||
| struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data, | ||||
| 			      unsigned int nr_secs, unsigned int len, | ||||
| 			      int alloc_type, gfp_t gfp_mask) | ||||
| { | ||||
| 	struct nvm_tgt_dev *dev = pblk->dev; | ||||
| 	void *kaddr = data; | ||||
| 	struct page *page; | ||||
| 	struct bio *bio; | ||||
| 	int i, ret; | ||||
| 
 | ||||
| 	if (alloc_type == PBLK_KMALLOC_META) | ||||
| 		return bio_map_kern(dev->q, kaddr, len, gfp_mask); | ||||
| 
 | ||||
| 	bio = bio_kmalloc(gfp_mask, nr_secs); | ||||
| 	if (!bio) | ||||
| 		return ERR_PTR(-ENOMEM); | ||||
| 
 | ||||
| 	for (i = 0; i < nr_secs; i++) { | ||||
| 		page = vmalloc_to_page(kaddr); | ||||
| 		if (!page) { | ||||
| 			pblk_err(pblk, "could not map vmalloc bio\n"); | ||||
| 			bio_put(bio); | ||||
| 			bio = ERR_PTR(-ENOMEM); | ||||
| 			goto out; | ||||
| 		} | ||||
| 
 | ||||
| 		ret = bio_add_pc_page(dev->q, bio, page, PAGE_SIZE, 0); | ||||
| 		if (ret != PAGE_SIZE) { | ||||
| 			pblk_err(pblk, "could not add page to bio\n"); | ||||
| 			bio_put(bio); | ||||
| 			bio = ERR_PTR(-ENOMEM); | ||||
| 			goto out; | ||||
| 		} | ||||
| 
 | ||||
| 		kaddr += PAGE_SIZE; | ||||
| 	} | ||||
| 
 | ||||
| 	bio->bi_end_io = pblk_bio_map_addr_endio; | ||||
| out: | ||||
| 	return bio; | ||||
| } | ||||
| 
 | ||||
| int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail, | ||||
| 		   unsigned long secs_to_flush, bool skip_meta) | ||||
| { | ||||
| @ -722,9 +676,7 @@ u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line) | ||||
| 
 | ||||
| int pblk_line_smeta_read(struct pblk *pblk, struct pblk_line *line) | ||||
| { | ||||
| 	struct nvm_tgt_dev *dev = pblk->dev; | ||||
| 	struct pblk_line_meta *lm = &pblk->lm; | ||||
| 	struct bio *bio; | ||||
| 	struct ppa_addr *ppa_list; | ||||
| 	struct nvm_rq rqd; | ||||
| 	u64 paddr = pblk_line_smeta_start(pblk, line); | ||||
| @ -736,16 +688,6 @@ int pblk_line_smeta_read(struct pblk *pblk, struct pblk_line *line) | ||||
| 	if (ret) | ||||
| 		return ret; | ||||
| 
 | ||||
| 	bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL); | ||||
| 	if (IS_ERR(bio)) { | ||||
| 		ret = PTR_ERR(bio); | ||||
| 		goto clear_rqd; | ||||
| 	} | ||||
| 
 | ||||
| 	bio->bi_iter.bi_sector = 0; /* internal bio */ | ||||
| 	bio_set_op_attrs(bio, REQ_OP_READ, 0); | ||||
| 
 | ||||
| 	rqd.bio = bio; | ||||
| 	rqd.opcode = NVM_OP_PREAD; | ||||
| 	rqd.nr_ppas = lm->smeta_sec; | ||||
| 	rqd.is_seq = 1; | ||||
| @ -754,10 +696,9 @@ int pblk_line_smeta_read(struct pblk *pblk, struct pblk_line *line) | ||||
| 	for (i = 0; i < lm->smeta_sec; i++, paddr++) | ||||
| 		ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id); | ||||
| 
 | ||||
| 	ret = pblk_submit_io_sync(pblk, &rqd); | ||||
| 	ret = pblk_submit_io_sync(pblk, &rqd, line->smeta); | ||||
| 	if (ret) { | ||||
| 		pblk_err(pblk, "smeta I/O submission failed: %d\n", ret); | ||||
| 		bio_put(bio); | ||||
| 		goto clear_rqd; | ||||
| 	} | ||||
| 
 | ||||
| @ -776,9 +717,7 @@ clear_rqd: | ||||
| static int pblk_line_smeta_write(struct pblk *pblk, struct pblk_line *line, | ||||
| 				 u64 paddr) | ||||
| { | ||||
| 	struct nvm_tgt_dev *dev = pblk->dev; | ||||
| 	struct pblk_line_meta *lm = &pblk->lm; | ||||
| 	struct bio *bio; | ||||
| 	struct ppa_addr *ppa_list; | ||||
| 	struct nvm_rq rqd; | ||||
| 	__le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf); | ||||
| @ -791,16 +730,6 @@ static int pblk_line_smeta_write(struct pblk *pblk, struct pblk_line *line, | ||||
| 	if (ret) | ||||
| 		return ret; | ||||
| 
 | ||||
| 	bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL); | ||||
| 	if (IS_ERR(bio)) { | ||||
| 		ret = PTR_ERR(bio); | ||||
| 		goto clear_rqd; | ||||
| 	} | ||||
| 
 | ||||
| 	bio->bi_iter.bi_sector = 0; /* internal bio */ | ||||
| 	bio_set_op_attrs(bio, REQ_OP_WRITE, 0); | ||||
| 
 | ||||
| 	rqd.bio = bio; | ||||
| 	rqd.opcode = NVM_OP_PWRITE; | ||||
| 	rqd.nr_ppas = lm->smeta_sec; | ||||
| 	rqd.is_seq = 1; | ||||
| @ -814,10 +743,9 @@ static int pblk_line_smeta_write(struct pblk *pblk, struct pblk_line *line, | ||||
| 		meta->lba = lba_list[paddr] = addr_empty; | ||||
| 	} | ||||
| 
 | ||||
| 	ret = pblk_submit_io_sync_sem(pblk, &rqd); | ||||
| 	ret = pblk_submit_io_sync_sem(pblk, &rqd, line->smeta); | ||||
| 	if (ret) { | ||||
| 		pblk_err(pblk, "smeta I/O submission failed: %d\n", ret); | ||||
| 		bio_put(bio); | ||||
| 		goto clear_rqd; | ||||
| 	} | ||||
| 
 | ||||
| @ -838,10 +766,8 @@ int pblk_line_emeta_read(struct pblk *pblk, struct pblk_line *line, | ||||
| { | ||||
| 	struct nvm_tgt_dev *dev = pblk->dev; | ||||
| 	struct nvm_geo *geo = &dev->geo; | ||||
| 	struct pblk_line_mgmt *l_mg = &pblk->l_mg; | ||||
| 	struct pblk_line_meta *lm = &pblk->lm; | ||||
| 	void *ppa_list_buf, *meta_list; | ||||
| 	struct bio *bio; | ||||
| 	struct ppa_addr *ppa_list; | ||||
| 	struct nvm_rq rqd; | ||||
| 	u64 paddr = line->emeta_ssec; | ||||
| @ -867,17 +793,6 @@ next_rq: | ||||
| 	rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false); | ||||
| 	rq_len = rq_ppas * geo->csecs; | ||||
| 
 | ||||
| 	bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len, | ||||
| 					l_mg->emeta_alloc_type, GFP_KERNEL); | ||||
| 	if (IS_ERR(bio)) { | ||||
| 		ret = PTR_ERR(bio); | ||||
| 		goto free_rqd_dma; | ||||
| 	} | ||||
| 
 | ||||
| 	bio->bi_iter.bi_sector = 0; /* internal bio */ | ||||
| 	bio_set_op_attrs(bio, REQ_OP_READ, 0); | ||||
| 
 | ||||
| 	rqd.bio = bio; | ||||
| 	rqd.meta_list = meta_list; | ||||
| 	rqd.ppa_list = ppa_list_buf; | ||||
| 	rqd.dma_meta_list = dma_meta_list; | ||||
| @ -896,7 +811,6 @@ next_rq: | ||||
| 		while (test_bit(pos, line->blk_bitmap)) { | ||||
| 			paddr += min; | ||||
| 			if (pblk_boundary_paddr_checks(pblk, paddr)) { | ||||
| 				bio_put(bio); | ||||
| 				ret = -EINTR; | ||||
| 				goto free_rqd_dma; | ||||
| 			} | ||||
| @ -906,7 +820,6 @@ next_rq: | ||||
| 		} | ||||
| 
 | ||||
| 		if (pblk_boundary_paddr_checks(pblk, paddr + min)) { | ||||
| 			bio_put(bio); | ||||
| 			ret = -EINTR; | ||||
| 			goto free_rqd_dma; | ||||
| 		} | ||||
| @ -915,10 +828,9 @@ next_rq: | ||||
| 			ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line_id); | ||||
| 	} | ||||
| 
 | ||||
| 	ret = pblk_submit_io_sync(pblk, &rqd); | ||||
| 	ret = pblk_submit_io_sync(pblk, &rqd, emeta_buf); | ||||
| 	if (ret) { | ||||
| 		pblk_err(pblk, "emeta I/O submission failed: %d\n", ret); | ||||
| 		bio_put(bio); | ||||
| 		goto free_rqd_dma; | ||||
| 	} | ||||
| 
 | ||||
| @ -963,7 +875,7 @@ static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa) | ||||
| 	/* The write thread schedules erases so that it minimizes disturbances
 | ||||
| 	 * with writes. Thus, there is no need to take the LUN semaphore. | ||||
| 	 */ | ||||
| 	ret = pblk_submit_io_sync(pblk, &rqd); | ||||
| 	ret = pblk_submit_io_sync(pblk, &rqd, NULL); | ||||
| 	rqd.private = pblk; | ||||
| 	__pblk_end_io_erase(pblk, &rqd); | ||||
| 
 | ||||
| @ -1792,7 +1704,7 @@ int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa) | ||||
| 	/* The write thread schedules erases so that it minimizes disturbances
 | ||||
| 	 * with writes. Thus, there is no need to take the LUN semaphore. | ||||
| 	 */ | ||||
| 	err = pblk_submit_io(pblk, rqd); | ||||
| 	err = pblk_submit_io(pblk, rqd, NULL); | ||||
| 	if (err) { | ||||
| 		struct nvm_tgt_dev *dev = pblk->dev; | ||||
| 		struct nvm_geo *geo = &dev->geo; | ||||
| @ -1923,7 +1835,6 @@ void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line) | ||||
| static void pblk_save_lba_list(struct pblk *pblk, struct pblk_line *line) | ||||
| { | ||||
| 	struct pblk_line_meta *lm = &pblk->lm; | ||||
| 	struct pblk_line_mgmt *l_mg = &pblk->l_mg; | ||||
| 	unsigned int lba_list_size = lm->emeta_len[2]; | ||||
| 	struct pblk_w_err_gc *w_err_gc = line->w_err_gc; | ||||
| 	struct pblk_emeta *emeta = line->emeta; | ||||
|  | ||||
| @ -342,7 +342,7 @@ split_retry: | ||||
| 		bio_put(int_bio); | ||||
| 		int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set); | ||||
| 		goto split_retry; | ||||
| 	} else if (pblk_submit_io(pblk, rqd)) { | ||||
| 	} else if (pblk_submit_io(pblk, rqd, NULL)) { | ||||
| 		/* Submitting IO to drive failed, let's report an error */ | ||||
| 		rqd->error = -ENODEV; | ||||
| 		pblk_end_io_read(rqd); | ||||
| @ -419,7 +419,6 @@ int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq) | ||||
| { | ||||
| 	struct nvm_tgt_dev *dev = pblk->dev; | ||||
| 	struct nvm_geo *geo = &dev->geo; | ||||
| 	struct bio *bio; | ||||
| 	struct nvm_rq rqd; | ||||
| 	int data_len; | ||||
| 	int ret = NVM_IO_OK; | ||||
| @ -447,25 +446,12 @@ int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq) | ||||
| 		goto out; | ||||
| 
 | ||||
| 	data_len = (gc_rq->secs_to_gc) * geo->csecs; | ||||
| 	bio = pblk_bio_map_addr(pblk, gc_rq->data, gc_rq->secs_to_gc, data_len, | ||||
| 						PBLK_VMALLOC_META, GFP_KERNEL); | ||||
| 	if (IS_ERR(bio)) { | ||||
| 		pblk_err(pblk, "could not allocate GC bio (%lu)\n", | ||||
| 								PTR_ERR(bio)); | ||||
| 		ret = PTR_ERR(bio); | ||||
| 		goto err_free_dma; | ||||
| 	} | ||||
| 
 | ||||
| 	bio->bi_iter.bi_sector = 0; /* internal bio */ | ||||
| 	bio_set_op_attrs(bio, REQ_OP_READ, 0); | ||||
| 
 | ||||
| 	rqd.opcode = NVM_OP_PREAD; | ||||
| 	rqd.nr_ppas = gc_rq->secs_to_gc; | ||||
| 	rqd.bio = bio; | ||||
| 
 | ||||
| 	if (pblk_submit_io_sync(pblk, &rqd)) { | ||||
| 	if (pblk_submit_io_sync(pblk, &rqd, gc_rq->data)) { | ||||
| 		ret = -EIO; | ||||
| 		goto err_free_bio; | ||||
| 		goto err_free_dma; | ||||
| 	} | ||||
| 
 | ||||
| 	pblk_read_check_rand(pblk, &rqd, gc_rq->lba_list, gc_rq->nr_secs); | ||||
| @ -489,8 +475,6 @@ out: | ||||
| 	pblk_free_rqd_meta(pblk, &rqd); | ||||
| 	return ret; | ||||
| 
 | ||||
| err_free_bio: | ||||
| 	bio_put(bio); | ||||
| err_free_dma: | ||||
| 	pblk_free_rqd_meta(pblk, &rqd); | ||||
| 	return ret; | ||||
|  | ||||
| @ -178,12 +178,11 @@ static int pblk_recov_pad_line(struct pblk *pblk, struct pblk_line *line, | ||||
| 	void *meta_list; | ||||
| 	struct pblk_pad_rq *pad_rq; | ||||
| 	struct nvm_rq *rqd; | ||||
| 	struct bio *bio; | ||||
| 	struct ppa_addr *ppa_list; | ||||
| 	void *data; | ||||
| 	__le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf); | ||||
| 	u64 w_ptr = line->cur_sec; | ||||
| 	int left_line_ppas, rq_ppas, rq_len; | ||||
| 	int left_line_ppas, rq_ppas; | ||||
| 	int i, j; | ||||
| 	int ret = 0; | ||||
| 
 | ||||
| @ -212,28 +211,15 @@ next_pad_rq: | ||||
| 		goto fail_complete; | ||||
| 	} | ||||
| 
 | ||||
| 	rq_len = rq_ppas * geo->csecs; | ||||
| 
 | ||||
| 	bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len, | ||||
| 						PBLK_VMALLOC_META, GFP_KERNEL); | ||||
| 	if (IS_ERR(bio)) { | ||||
| 		ret = PTR_ERR(bio); | ||||
| 		goto fail_complete; | ||||
| 	} | ||||
| 
 | ||||
| 	bio->bi_iter.bi_sector = 0; /* internal bio */ | ||||
| 	bio_set_op_attrs(bio, REQ_OP_WRITE, 0); | ||||
| 
 | ||||
| 	rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT); | ||||
| 
 | ||||
| 	ret = pblk_alloc_rqd_meta(pblk, rqd); | ||||
| 	if (ret) { | ||||
| 		pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT); | ||||
| 		bio_put(bio); | ||||
| 		goto fail_complete; | ||||
| 	} | ||||
| 
 | ||||
| 	rqd->bio = bio; | ||||
| 	rqd->bio = NULL; | ||||
| 	rqd->opcode = NVM_OP_PWRITE; | ||||
| 	rqd->is_seq = 1; | ||||
| 	rqd->nr_ppas = rq_ppas; | ||||
| @ -275,13 +261,12 @@ next_pad_rq: | ||||
| 	kref_get(&pad_rq->ref); | ||||
| 	pblk_down_chunk(pblk, ppa_list[0]); | ||||
| 
 | ||||
| 	ret = pblk_submit_io(pblk, rqd); | ||||
| 	ret = pblk_submit_io(pblk, rqd, data); | ||||
| 	if (ret) { | ||||
| 		pblk_err(pblk, "I/O submission failed: %d\n", ret); | ||||
| 		pblk_up_chunk(pblk, ppa_list[0]); | ||||
| 		kref_put(&pad_rq->ref, pblk_recov_complete); | ||||
| 		pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT); | ||||
| 		bio_put(bio); | ||||
| 		goto fail_complete; | ||||
| 	} | ||||
| 
 | ||||
| @ -375,7 +360,6 @@ static int pblk_recov_scan_oob(struct pblk *pblk, struct pblk_line *line, | ||||
| 	struct ppa_addr *ppa_list; | ||||
| 	void *meta_list; | ||||
| 	struct nvm_rq *rqd; | ||||
| 	struct bio *bio; | ||||
| 	void *data; | ||||
| 	dma_addr_t dma_ppa_list, dma_meta_list; | ||||
| 	__le64 *lba_list; | ||||
| @ -407,15 +391,7 @@ next_rq: | ||||
| 	rq_len = rq_ppas * geo->csecs; | ||||
| 
 | ||||
| retry_rq: | ||||
| 	bio = bio_map_kern(dev->q, data, rq_len, GFP_KERNEL); | ||||
| 	if (IS_ERR(bio)) | ||||
| 		return PTR_ERR(bio); | ||||
| 
 | ||||
| 	bio->bi_iter.bi_sector = 0; /* internal bio */ | ||||
| 	bio_set_op_attrs(bio, REQ_OP_READ, 0); | ||||
| 	bio_get(bio); | ||||
| 
 | ||||
| 	rqd->bio = bio; | ||||
| 	rqd->bio = NULL; | ||||
| 	rqd->opcode = NVM_OP_PREAD; | ||||
| 	rqd->meta_list = meta_list; | ||||
| 	rqd->nr_ppas = rq_ppas; | ||||
| @ -445,10 +421,9 @@ retry_rq: | ||||
| 				addr_to_gen_ppa(pblk, paddr + j, line->id); | ||||
| 	} | ||||
| 
 | ||||
| 	ret = pblk_submit_io_sync(pblk, rqd); | ||||
| 	ret = pblk_submit_io_sync(pblk, rqd, data); | ||||
| 	if (ret) { | ||||
| 		pblk_err(pblk, "I/O submission failed: %d\n", ret); | ||||
| 		bio_put(bio); | ||||
| 		return ret; | ||||
| 	} | ||||
| 
 | ||||
| @ -460,24 +435,20 @@ retry_rq: | ||||
| 
 | ||||
| 		if (padded) { | ||||
| 			pblk_log_read_err(pblk, rqd); | ||||
| 			bio_put(bio); | ||||
| 			return -EINTR; | ||||
| 		} | ||||
| 
 | ||||
| 		pad_distance = pblk_pad_distance(pblk, line); | ||||
| 		ret = pblk_recov_pad_line(pblk, line, pad_distance); | ||||
| 		if (ret) { | ||||
| 			bio_put(bio); | ||||
| 			return ret; | ||||
| 		} | ||||
| 
 | ||||
| 		padded = true; | ||||
| 		bio_put(bio); | ||||
| 		goto retry_rq; | ||||
| 	} | ||||
| 
 | ||||
| 	pblk_get_packed_meta(pblk, rqd); | ||||
| 	bio_put(bio); | ||||
| 
 | ||||
| 	for (i = 0; i < rqd->nr_ppas; i++) { | ||||
| 		struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i); | ||||
|  | ||||
| @ -373,7 +373,6 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line) | ||||
| 	struct pblk_emeta *emeta = meta_line->emeta; | ||||
| 	struct ppa_addr *ppa_list; | ||||
| 	struct pblk_g_ctx *m_ctx; | ||||
| 	struct bio *bio; | ||||
| 	struct nvm_rq *rqd; | ||||
| 	void *data; | ||||
| 	u64 paddr; | ||||
| @ -391,20 +390,9 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line) | ||||
| 	rq_len = rq_ppas * geo->csecs; | ||||
| 	data = ((void *)emeta->buf) + emeta->mem; | ||||
| 
 | ||||
| 	bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len, | ||||
| 					l_mg->emeta_alloc_type, GFP_KERNEL); | ||||
| 	if (IS_ERR(bio)) { | ||||
| 		pblk_err(pblk, "failed to map emeta io"); | ||||
| 		ret = PTR_ERR(bio); | ||||
| 		goto fail_free_rqd; | ||||
| 	} | ||||
| 	bio->bi_iter.bi_sector = 0; /* internal bio */ | ||||
| 	bio_set_op_attrs(bio, REQ_OP_WRITE, 0); | ||||
| 	rqd->bio = bio; | ||||
| 
 | ||||
| 	ret = pblk_alloc_w_rq(pblk, rqd, rq_ppas, pblk_end_io_write_meta); | ||||
| 	if (ret) | ||||
| 		goto fail_free_bio; | ||||
| 		goto fail_free_rqd; | ||||
| 
 | ||||
| 	ppa_list = nvm_rq_to_ppa_list(rqd); | ||||
| 	for (i = 0; i < rqd->nr_ppas; ) { | ||||
| @ -423,7 +411,7 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line) | ||||
| 
 | ||||
| 	pblk_down_chunk(pblk, ppa_list[0]); | ||||
| 
 | ||||
| 	ret = pblk_submit_io(pblk, rqd); | ||||
| 	ret = pblk_submit_io(pblk, rqd, data); | ||||
| 	if (ret) { | ||||
| 		pblk_err(pblk, "emeta I/O submission failed: %d\n", ret); | ||||
| 		goto fail_rollback; | ||||
| @ -437,8 +425,6 @@ fail_rollback: | ||||
| 	pblk_dealloc_page(pblk, meta_line, rq_ppas); | ||||
| 	list_add(&meta_line->list, &meta_line->list); | ||||
| 	spin_unlock(&l_mg->close_lock); | ||||
| fail_free_bio: | ||||
| 	bio_put(bio); | ||||
| fail_free_rqd: | ||||
| 	pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT); | ||||
| 	return ret; | ||||
| @ -523,7 +509,7 @@ static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd) | ||||
| 	meta_line = pblk_should_submit_meta_io(pblk, rqd); | ||||
| 
 | ||||
| 	/* Submit data write for current data line */ | ||||
| 	err = pblk_submit_io(pblk, rqd); | ||||
| 	err = pblk_submit_io(pblk, rqd, NULL); | ||||
| 	if (err) { | ||||
| 		pblk_err(pblk, "data I/O submission failed: %d\n", err); | ||||
| 		return NVM_IO_ERR; | ||||
|  | ||||
| @ -783,14 +783,10 @@ struct nvm_chk_meta *pblk_chunk_get_off(struct pblk *pblk, | ||||
| 					      struct ppa_addr ppa); | ||||
| void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd); | ||||
| void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd); | ||||
| int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd); | ||||
| int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd); | ||||
| int pblk_submit_io_sync_sem(struct pblk *pblk, struct nvm_rq *rqd); | ||||
| int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd, void *buf); | ||||
| int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd, void *buf); | ||||
| int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line); | ||||
| void pblk_check_chunk_state_update(struct pblk *pblk, struct nvm_rq *rqd); | ||||
| struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data, | ||||
| 			      unsigned int nr_secs, unsigned int len, | ||||
| 			      int alloc_type, gfp_t gfp_mask); | ||||
| struct pblk_line *pblk_line_get(struct pblk *pblk); | ||||
| struct pblk_line *pblk_line_get_first_data(struct pblk *pblk); | ||||
| struct pblk_line *pblk_line_replace_data(struct pblk *pblk); | ||||
|  | ||||
| @ -667,11 +667,14 @@ static struct request *nvme_nvm_alloc_request(struct request_queue *q, | ||||
| 	return rq; | ||||
| } | ||||
| 
 | ||||
| static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd) | ||||
| static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd, | ||||
| 			      void *buf) | ||||
| { | ||||
| 	struct nvm_geo *geo = &dev->geo; | ||||
| 	struct request_queue *q = dev->q; | ||||
| 	struct nvme_nvm_command *cmd; | ||||
| 	struct request *rq; | ||||
| 	int ret; | ||||
| 
 | ||||
| 	cmd = kzalloc(sizeof(struct nvme_nvm_command), GFP_KERNEL); | ||||
| 	if (!cmd) | ||||
| @ -679,8 +682,15 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd) | ||||
| 
 | ||||
| 	rq = nvme_nvm_alloc_request(q, rqd, cmd); | ||||
| 	if (IS_ERR(rq)) { | ||||
| 		kfree(cmd); | ||||
| 		return PTR_ERR(rq); | ||||
| 		ret = PTR_ERR(rq); | ||||
| 		goto err_free_cmd; | ||||
| 	} | ||||
| 
 | ||||
| 	if (buf) { | ||||
| 		ret = blk_rq_map_kern(q, rq, buf, geo->csecs * rqd->nr_ppas, | ||||
| 				GFP_KERNEL); | ||||
| 		if (ret) | ||||
| 			goto err_free_cmd; | ||||
| 	} | ||||
| 
 | ||||
| 	rq->end_io_data = rqd; | ||||
| @ -688,6 +698,10 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd) | ||||
| 	blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io); | ||||
| 
 | ||||
| 	return 0; | ||||
| 
 | ||||
| err_free_cmd: | ||||
| 	kfree(cmd); | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name, | ||||
|  | ||||
| @ -88,7 +88,7 @@ typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *); | ||||
| typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int); | ||||
| typedef int (nvm_get_chk_meta_fn)(struct nvm_dev *, sector_t, int, | ||||
| 							struct nvm_chk_meta *); | ||||
| typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *); | ||||
| typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *, void *); | ||||
| typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *, int); | ||||
| typedef void (nvm_destroy_dma_pool_fn)(void *); | ||||
| typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t, | ||||
| @ -680,8 +680,8 @@ extern int nvm_get_chunk_meta(struct nvm_tgt_dev *, struct ppa_addr, | ||||
| 			      int, struct nvm_chk_meta *); | ||||
| extern int nvm_set_chunk_meta(struct nvm_tgt_dev *, struct ppa_addr *, | ||||
| 			      int, int); | ||||
| extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *); | ||||
| extern int nvm_submit_io_sync(struct nvm_tgt_dev *, struct nvm_rq *); | ||||
| extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *, void *); | ||||
| extern int nvm_submit_io_sync(struct nvm_tgt_dev *, struct nvm_rq *, void *); | ||||
| extern void nvm_end_io(struct nvm_rq *); | ||||
| 
 | ||||
| #else /* CONFIG_NVM */ | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user