fs: rename buffer trylock
Like the page lock change, this also requires name change, so convert the raw test_and_set bitop to a trylock. Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									529ae9aaa0
								
							
						
					
					
						commit
						ca5de404ff
					
				| @ -1720,7 +1720,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page, | ||||
| 		 */ | ||||
| 		if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) { | ||||
| 			lock_buffer(bh); | ||||
| 		} else if (test_set_buffer_locked(bh)) { | ||||
| 		} else if (!trylock_buffer(bh)) { | ||||
| 			redirty_page_for_writepage(wbc, page); | ||||
| 			continue; | ||||
| 		} | ||||
| @ -3000,7 +3000,7 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[]) | ||||
| 
 | ||||
| 		if (rw == SWRITE || rw == SWRITE_SYNC) | ||||
| 			lock_buffer(bh); | ||||
| 		else if (test_set_buffer_locked(bh)) | ||||
| 		else if (!trylock_buffer(bh)) | ||||
| 			continue; | ||||
| 
 | ||||
| 		if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC) { | ||||
|  | ||||
| @ -221,7 +221,7 @@ write_out_data: | ||||
| 		 * blocking lock_buffer(). | ||||
| 		 */ | ||||
| 		if (buffer_dirty(bh)) { | ||||
| 			if (test_set_buffer_locked(bh)) { | ||||
| 			if (!trylock_buffer(bh)) { | ||||
| 				BUFFER_TRACE(bh, "needs blocking lock"); | ||||
| 				spin_unlock(&journal->j_list_lock); | ||||
| 				/* Write out all data to prevent deadlocks */ | ||||
|  | ||||
| @ -1194,7 +1194,7 @@ lock_retry_remap: | ||||
| 		tbh = bhs[i]; | ||||
| 		if (!tbh) | ||||
| 			continue; | ||||
| 		if (unlikely(test_set_buffer_locked(tbh))) | ||||
| 		if (!trylock_buffer(tbh)) | ||||
| 			BUG(); | ||||
| 		/* The buffer dirty state is now irrelevant, just clean it. */ | ||||
| 		clear_buffer_dirty(tbh); | ||||
|  | ||||
| @ -665,7 +665,7 @@ lock_retry_remap: | ||||
| 	for (i = 0; i < nr_bhs; i++) { | ||||
| 		struct buffer_head *tbh = bhs[i]; | ||||
| 
 | ||||
| 		if (unlikely(test_set_buffer_locked(tbh))) | ||||
| 		if (!trylock_buffer(tbh)) | ||||
| 			continue; | ||||
| 		if (unlikely(buffer_uptodate(tbh))) { | ||||
| 			unlock_buffer(tbh); | ||||
|  | ||||
| @ -586,7 +586,7 @@ int ntfs_sync_mft_mirror(ntfs_volume *vol, const unsigned long mft_no, | ||||
| 		for (i_bhs = 0; i_bhs < nr_bhs; i_bhs++) { | ||||
| 			struct buffer_head *tbh = bhs[i_bhs]; | ||||
| 
 | ||||
| 			if (unlikely(test_set_buffer_locked(tbh))) | ||||
| 			if (!trylock_buffer(tbh)) | ||||
| 				BUG(); | ||||
| 			BUG_ON(!buffer_uptodate(tbh)); | ||||
| 			clear_buffer_dirty(tbh); | ||||
| @ -779,7 +779,7 @@ int write_mft_record_nolock(ntfs_inode *ni, MFT_RECORD *m, int sync) | ||||
| 	for (i_bhs = 0; i_bhs < nr_bhs; i_bhs++) { | ||||
| 		struct buffer_head *tbh = bhs[i_bhs]; | ||||
| 
 | ||||
| 		if (unlikely(test_set_buffer_locked(tbh))) | ||||
| 		if (!trylock_buffer(tbh)) | ||||
| 			BUG(); | ||||
| 		BUG_ON(!buffer_uptodate(tbh)); | ||||
| 		clear_buffer_dirty(tbh); | ||||
|  | ||||
| @ -2435,7 +2435,7 @@ static int reiserfs_write_full_page(struct page *page, | ||||
| 		if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) { | ||||
| 			lock_buffer(bh); | ||||
| 		} else { | ||||
| 			if (test_set_buffer_locked(bh)) { | ||||
| 			if (!trylock_buffer(bh)) { | ||||
| 				redirty_page_for_writepage(wbc, page); | ||||
| 				continue; | ||||
| 			} | ||||
|  | ||||
| @ -855,7 +855,7 @@ static int write_ordered_buffers(spinlock_t * lock, | ||||
| 		jh = JH_ENTRY(list->next); | ||||
| 		bh = jh->bh; | ||||
| 		get_bh(bh); | ||||
| 		if (test_set_buffer_locked(bh)) { | ||||
| 		if (!trylock_buffer(bh)) { | ||||
| 			if (!buffer_dirty(bh)) { | ||||
| 				list_move(&jh->list, &tmp); | ||||
| 				goto loop_next; | ||||
| @ -3871,7 +3871,7 @@ int reiserfs_prepare_for_journal(struct super_block *p_s_sb, | ||||
| { | ||||
| 	PROC_INFO_INC(p_s_sb, journal.prepare); | ||||
| 
 | ||||
| 	if (test_set_buffer_locked(bh)) { | ||||
| 	if (!trylock_buffer(bh)) { | ||||
| 		if (!wait) | ||||
| 			return 0; | ||||
| 		lock_buffer(bh); | ||||
|  | ||||
| @ -1104,7 +1104,7 @@ xfs_page_state_convert( | ||||
| 			 * that we are writing into for the first time. | ||||
| 			 */ | ||||
| 			type = IOMAP_NEW; | ||||
| 			if (!test_and_set_bit(BH_Lock, &bh->b_state)) { | ||||
| 			if (trylock_buffer(bh)) { | ||||
| 				ASSERT(buffer_mapped(bh)); | ||||
| 				if (iomap_valid) | ||||
| 					all_bh = 1; | ||||
|  | ||||
| @ -115,7 +115,6 @@ BUFFER_FNS(Uptodate, uptodate) | ||||
| BUFFER_FNS(Dirty, dirty) | ||||
| TAS_BUFFER_FNS(Dirty, dirty) | ||||
| BUFFER_FNS(Lock, locked) | ||||
| TAS_BUFFER_FNS(Lock, locked) | ||||
| BUFFER_FNS(Req, req) | ||||
| TAS_BUFFER_FNS(Req, req) | ||||
| BUFFER_FNS(Mapped, mapped) | ||||
| @ -321,10 +320,15 @@ static inline void wait_on_buffer(struct buffer_head *bh) | ||||
| 		__wait_on_buffer(bh); | ||||
| } | ||||
| 
 | ||||
| static inline int trylock_buffer(struct buffer_head *bh) | ||||
| { | ||||
| 	return likely(!test_and_set_bit(BH_Lock, &bh->b_state)); | ||||
| } | ||||
| 
 | ||||
| static inline void lock_buffer(struct buffer_head *bh) | ||||
| { | ||||
| 	might_sleep(); | ||||
| 	if (test_set_buffer_locked(bh)) | ||||
| 	if (!trylock_buffer(bh)) | ||||
| 		__lock_buffer(bh); | ||||
| } | ||||
| 
 | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user