2015-07-08 09:59:36 +00:00
|
|
|
/*
|
|
|
|
* f2fs extent cache support
|
|
|
|
*
|
|
|
|
* Copyright (c) 2015 Motorola Mobility
|
|
|
|
* Copyright (c) 2015 Samsung Electronics
|
|
|
|
* Authors: Jaegeuk Kim <jaegeuk@kernel.org>
|
|
|
|
* Chao Yu <chao2.yu@samsung.com>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/f2fs_fs.h>
|
|
|
|
|
|
|
|
#include "f2fs.h"
|
|
|
|
#include "node.h"
|
|
|
|
#include <trace/events/f2fs.h>
|
|
|
|
|
2017-04-11 01:25:22 +00:00
|
|
|
static struct rb_entry *__lookup_rb_tree_fast(struct rb_entry *cached_re,
|
|
|
|
unsigned int ofs)
|
|
|
|
{
|
|
|
|
if (cached_re) {
|
|
|
|
if (cached_re->ofs <= ofs &&
|
|
|
|
cached_re->ofs + cached_re->len > ofs) {
|
|
|
|
return cached_re;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct rb_entry *__lookup_rb_tree_slow(struct rb_root *root,
|
|
|
|
unsigned int ofs)
|
|
|
|
{
|
|
|
|
struct rb_node *node = root->rb_node;
|
|
|
|
struct rb_entry *re;
|
|
|
|
|
|
|
|
while (node) {
|
|
|
|
re = rb_entry(node, struct rb_entry, rb_node);
|
|
|
|
|
|
|
|
if (ofs < re->ofs)
|
|
|
|
node = node->rb_left;
|
|
|
|
else if (ofs >= re->ofs + re->len)
|
|
|
|
node = node->rb_right;
|
|
|
|
else
|
|
|
|
return re;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2017-04-14 15:24:55 +00:00
|
|
|
struct rb_entry *__lookup_rb_tree(struct rb_root *root,
|
2017-04-11 01:25:22 +00:00
|
|
|
struct rb_entry *cached_re, unsigned int ofs)
|
|
|
|
{
|
|
|
|
struct rb_entry *re;
|
|
|
|
|
|
|
|
re = __lookup_rb_tree_fast(cached_re, ofs);
|
|
|
|
if (!re)
|
|
|
|
return __lookup_rb_tree_slow(root, ofs);
|
|
|
|
|
|
|
|
return re;
|
|
|
|
}
|
|
|
|
|
2017-04-14 15:24:55 +00:00
|
|
|
struct rb_node **__lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
|
2017-04-11 01:25:22 +00:00
|
|
|
struct rb_root *root, struct rb_node **parent,
|
|
|
|
unsigned int ofs)
|
|
|
|
{
|
|
|
|
struct rb_node **p = &root->rb_node;
|
|
|
|
struct rb_entry *re;
|
|
|
|
|
|
|
|
while (*p) {
|
|
|
|
*parent = *p;
|
|
|
|
re = rb_entry(*parent, struct rb_entry, rb_node);
|
|
|
|
|
|
|
|
if (ofs < re->ofs)
|
|
|
|
p = &(*p)->rb_left;
|
|
|
|
else if (ofs >= re->ofs + re->len)
|
|
|
|
p = &(*p)->rb_right;
|
|
|
|
else
|
|
|
|
f2fs_bug_on(sbi, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* lookup rb entry in position of @ofs in rb-tree,
|
|
|
|
* if hit, return the entry, otherwise, return NULL
|
|
|
|
* @prev_ex: extent before ofs
|
|
|
|
* @next_ex: extent after ofs
|
|
|
|
* @insert_p: insert point for new extent at ofs
|
|
|
|
* in order to simpfy the insertion after.
|
|
|
|
* tree must stay unchanged between lookup and insertion.
|
|
|
|
*/
|
2017-04-14 15:24:55 +00:00
|
|
|
struct rb_entry *__lookup_rb_tree_ret(struct rb_root *root,
|
2017-04-11 01:25:22 +00:00
|
|
|
struct rb_entry *cached_re,
|
|
|
|
unsigned int ofs,
|
|
|
|
struct rb_entry **prev_entry,
|
|
|
|
struct rb_entry **next_entry,
|
|
|
|
struct rb_node ***insert_p,
|
2017-04-14 15:24:55 +00:00
|
|
|
struct rb_node **insert_parent,
|
|
|
|
bool force)
|
2017-04-11 01:25:22 +00:00
|
|
|
{
|
|
|
|
struct rb_node **pnode = &root->rb_node;
|
|
|
|
struct rb_node *parent = NULL, *tmp_node;
|
|
|
|
struct rb_entry *re = cached_re;
|
|
|
|
|
|
|
|
*insert_p = NULL;
|
|
|
|
*insert_parent = NULL;
|
|
|
|
*prev_entry = NULL;
|
|
|
|
*next_entry = NULL;
|
|
|
|
|
|
|
|
if (RB_EMPTY_ROOT(root))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (re) {
|
|
|
|
if (re->ofs <= ofs && re->ofs + re->len > ofs)
|
|
|
|
goto lookup_neighbors;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (*pnode) {
|
|
|
|
parent = *pnode;
|
|
|
|
re = rb_entry(*pnode, struct rb_entry, rb_node);
|
|
|
|
|
|
|
|
if (ofs < re->ofs)
|
|
|
|
pnode = &(*pnode)->rb_left;
|
|
|
|
else if (ofs >= re->ofs + re->len)
|
|
|
|
pnode = &(*pnode)->rb_right;
|
|
|
|
else
|
|
|
|
goto lookup_neighbors;
|
|
|
|
}
|
|
|
|
|
|
|
|
*insert_p = pnode;
|
|
|
|
*insert_parent = parent;
|
|
|
|
|
|
|
|
re = rb_entry(parent, struct rb_entry, rb_node);
|
|
|
|
tmp_node = parent;
|
|
|
|
if (parent && ofs > re->ofs)
|
|
|
|
tmp_node = rb_next(parent);
|
|
|
|
*next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
|
|
|
|
|
|
|
|
tmp_node = parent;
|
|
|
|
if (parent && ofs < re->ofs)
|
|
|
|
tmp_node = rb_prev(parent);
|
|
|
|
*prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
lookup_neighbors:
|
2017-04-14 15:24:55 +00:00
|
|
|
if (ofs == re->ofs || force) {
|
2017-04-11 01:25:22 +00:00
|
|
|
/* lookup prev node for merging backward later */
|
|
|
|
tmp_node = rb_prev(&re->rb_node);
|
|
|
|
*prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
|
|
|
|
}
|
2017-04-14 15:24:55 +00:00
|
|
|
if (ofs == re->ofs + re->len - 1 || force) {
|
2017-04-11 01:25:22 +00:00
|
|
|
/* lookup next node for merging frontward later */
|
|
|
|
tmp_node = rb_next(&re->rb_node);
|
|
|
|
*next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
|
|
|
|
}
|
|
|
|
return re;
|
|
|
|
}
|
|
|
|
|
2017-04-17 10:21:43 +00:00
|
|
|
bool __check_rb_tree_consistence(struct f2fs_sb_info *sbi,
|
|
|
|
struct rb_root *root)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_F2FS_CHECK_FS
|
|
|
|
struct rb_node *cur = rb_first(root), *next;
|
|
|
|
struct rb_entry *cur_re, *next_re;
|
|
|
|
|
|
|
|
if (!cur)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
while (cur) {
|
|
|
|
next = rb_next(cur);
|
|
|
|
if (!next)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
cur_re = rb_entry(cur, struct rb_entry, rb_node);
|
|
|
|
next_re = rb_entry(next, struct rb_entry, rb_node);
|
|
|
|
|
|
|
|
if (cur_re->ofs + cur_re->len > next_re->ofs) {
|
|
|
|
f2fs_msg(sbi->sb, KERN_INFO, "inconsistent rbtree, "
|
|
|
|
"cur(%u, %u) next(%u, %u)",
|
|
|
|
cur_re->ofs, cur_re->len,
|
|
|
|
next_re->ofs, next_re->len);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
cur = next;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-07-08 09:59:36 +00:00
|
|
|
static struct kmem_cache *extent_tree_slab;
|
|
|
|
static struct kmem_cache *extent_node_slab;
|
|
|
|
|
|
|
|
static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
|
|
|
|
struct extent_tree *et, struct extent_info *ei,
|
|
|
|
struct rb_node *parent, struct rb_node **p)
|
|
|
|
{
|
|
|
|
struct extent_node *en;
|
|
|
|
|
|
|
|
en = kmem_cache_alloc(extent_node_slab, GFP_ATOMIC);
|
|
|
|
if (!en)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
en->ei = *ei;
|
|
|
|
INIT_LIST_HEAD(&en->list);
|
2016-01-26 12:56:26 +00:00
|
|
|
en->et = et;
|
2015-07-08 09:59:36 +00:00
|
|
|
|
|
|
|
rb_link_node(&en->rb_node, parent, p);
|
|
|
|
rb_insert_color(&en->rb_node, &et->root);
|
2016-01-08 12:22:52 +00:00
|
|
|
atomic_inc(&et->node_cnt);
|
2015-07-08 09:59:36 +00:00
|
|
|
atomic_inc(&sbi->total_ext_node);
|
|
|
|
return en;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __detach_extent_node(struct f2fs_sb_info *sbi,
|
|
|
|
struct extent_tree *et, struct extent_node *en)
|
|
|
|
{
|
|
|
|
rb_erase(&en->rb_node, &et->root);
|
2016-01-08 12:22:52 +00:00
|
|
|
atomic_dec(&et->node_cnt);
|
2015-07-08 09:59:36 +00:00
|
|
|
atomic_dec(&sbi->total_ext_node);
|
|
|
|
|
|
|
|
if (et->cached_en == en)
|
|
|
|
et->cached_en = NULL;
|
2016-01-26 12:56:25 +00:00
|
|
|
kmem_cache_free(extent_node_slab, en);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Flow to release an extent_node:
|
|
|
|
* 1. list_del_init
|
|
|
|
* 2. __detach_extent_node
|
|
|
|
* 3. kmem_cache_free.
|
|
|
|
*/
|
|
|
|
static void __release_extent_node(struct f2fs_sb_info *sbi,
|
|
|
|
struct extent_tree *et, struct extent_node *en)
|
|
|
|
{
|
|
|
|
spin_lock(&sbi->extent_lock);
|
2016-01-26 12:56:26 +00:00
|
|
|
f2fs_bug_on(sbi, list_empty(&en->list));
|
|
|
|
list_del_init(&en->list);
|
2016-01-26 12:56:25 +00:00
|
|
|
spin_unlock(&sbi->extent_lock);
|
|
|
|
|
|
|
|
__detach_extent_node(sbi, et, en);
|
2015-07-08 09:59:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct extent_tree *__grab_extent_tree(struct inode *inode)
|
|
|
|
{
|
|
|
|
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
|
|
|
struct extent_tree *et;
|
|
|
|
nid_t ino = inode->i_ino;
|
|
|
|
|
2017-02-23 11:39:59 +00:00
|
|
|
mutex_lock(&sbi->extent_tree_lock);
|
2015-07-08 09:59:36 +00:00
|
|
|
et = radix_tree_lookup(&sbi->extent_tree_root, ino);
|
|
|
|
if (!et) {
|
|
|
|
et = f2fs_kmem_cache_alloc(extent_tree_slab, GFP_NOFS);
|
|
|
|
f2fs_radix_tree_insert(&sbi->extent_tree_root, ino, et);
|
|
|
|
memset(et, 0, sizeof(struct extent_tree));
|
|
|
|
et->ino = ino;
|
|
|
|
et->root = RB_ROOT;
|
|
|
|
et->cached_en = NULL;
|
|
|
|
rwlock_init(&et->lock);
|
2015-12-31 23:02:16 +00:00
|
|
|
INIT_LIST_HEAD(&et->list);
|
2016-01-08 12:22:52 +00:00
|
|
|
atomic_set(&et->node_cnt, 0);
|
2015-12-22 03:20:15 +00:00
|
|
|
atomic_inc(&sbi->total_ext_tree);
|
2015-12-22 03:25:50 +00:00
|
|
|
} else {
|
|
|
|
atomic_dec(&sbi->total_zombie_tree);
|
2015-12-31 23:02:16 +00:00
|
|
|
list_del_init(&et->list);
|
2015-07-08 09:59:36 +00:00
|
|
|
}
|
2017-02-23 11:39:59 +00:00
|
|
|
mutex_unlock(&sbi->extent_tree_lock);
|
2015-07-08 09:59:36 +00:00
|
|
|
|
|
|
|
/* never died until evict_inode */
|
|
|
|
F2FS_I(inode)->extent_tree = et;
|
|
|
|
|
|
|
|
return et;
|
|
|
|
}
|
|
|
|
|
2015-08-19 11:14:15 +00:00
|
|
|
static struct extent_node *__init_extent_tree(struct f2fs_sb_info *sbi,
|
|
|
|
struct extent_tree *et, struct extent_info *ei)
|
2015-07-08 09:59:36 +00:00
|
|
|
{
|
|
|
|
struct rb_node **p = &et->root.rb_node;
|
|
|
|
struct extent_node *en;
|
|
|
|
|
2015-08-19 11:14:15 +00:00
|
|
|
en = __attach_extent_node(sbi, et, ei, NULL, p);
|
2015-07-08 09:59:36 +00:00
|
|
|
if (!en)
|
|
|
|
return NULL;
|
2015-08-19 11:14:15 +00:00
|
|
|
|
|
|
|
et->largest = en->ei;
|
2015-07-08 09:59:36 +00:00
|
|
|
et->cached_en = en;
|
|
|
|
return en;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
|
2016-01-26 12:56:26 +00:00
|
|
|
struct extent_tree *et)
|
2015-07-08 09:59:36 +00:00
|
|
|
{
|
|
|
|
struct rb_node *node, *next;
|
|
|
|
struct extent_node *en;
|
2016-01-08 12:22:52 +00:00
|
|
|
unsigned int count = atomic_read(&et->node_cnt);
|
2015-07-08 09:59:36 +00:00
|
|
|
|
|
|
|
node = rb_first(&et->root);
|
|
|
|
while (node) {
|
|
|
|
next = rb_next(node);
|
|
|
|
en = rb_entry(node, struct extent_node, rb_node);
|
2016-01-26 12:56:26 +00:00
|
|
|
__release_extent_node(sbi, et, en);
|
2015-07-08 09:59:36 +00:00
|
|
|
node = next;
|
|
|
|
}
|
|
|
|
|
2016-01-08 12:22:52 +00:00
|
|
|
return count - atomic_read(&et->node_cnt);
|
2015-07-08 09:59:36 +00:00
|
|
|
}
|
|
|
|
|
2015-09-17 10:24:17 +00:00
|
|
|
static void __drop_largest_extent(struct inode *inode,
|
|
|
|
pgoff_t fofs, unsigned int len)
|
2015-07-08 09:59:36 +00:00
|
|
|
{
|
|
|
|
struct extent_info *largest = &F2FS_I(inode)->extent_tree->largest;
|
|
|
|
|
2016-05-20 16:52:20 +00:00
|
|
|
if (fofs < largest->fofs + largest->len && fofs + len > largest->fofs) {
|
2015-07-08 09:59:36 +00:00
|
|
|
largest->len = 0;
|
2016-10-14 18:51:23 +00:00
|
|
|
f2fs_mark_inode_dirty_sync(inode, true);
|
2016-05-20 16:52:20 +00:00
|
|
|
}
|
2015-07-08 09:59:36 +00:00
|
|
|
}
|
|
|
|
|
2015-12-28 19:39:06 +00:00
|
|
|
/* return true, if inode page is changed */
|
2017-05-19 07:06:12 +00:00
|
|
|
static bool __f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext)
|
2015-07-08 09:59:36 +00:00
|
|
|
{
|
|
|
|
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
|
|
|
struct extent_tree *et;
|
|
|
|
struct extent_node *en;
|
|
|
|
struct extent_info ei;
|
|
|
|
|
2015-12-28 19:39:06 +00:00
|
|
|
if (!f2fs_may_extent_tree(inode)) {
|
|
|
|
/* drop largest extent */
|
|
|
|
if (i_ext && i_ext->len) {
|
|
|
|
i_ext->len = 0;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
2015-07-08 09:59:36 +00:00
|
|
|
|
|
|
|
et = __grab_extent_tree(inode);
|
|
|
|
|
2015-12-28 19:39:06 +00:00
|
|
|
if (!i_ext || !i_ext->len)
|
|
|
|
return false;
|
2015-07-08 09:59:36 +00:00
|
|
|
|
2016-05-04 15:19:47 +00:00
|
|
|
get_extent_info(&ei, i_ext);
|
2015-07-08 09:59:36 +00:00
|
|
|
|
|
|
|
write_lock(&et->lock);
|
2016-01-08 12:22:52 +00:00
|
|
|
if (atomic_read(&et->node_cnt))
|
2015-07-08 09:59:36 +00:00
|
|
|
goto out;
|
|
|
|
|
2015-08-19 11:14:15 +00:00
|
|
|
en = __init_extent_tree(sbi, et, &ei);
|
2015-07-08 09:59:36 +00:00
|
|
|
if (en) {
|
|
|
|
spin_lock(&sbi->extent_lock);
|
|
|
|
list_add_tail(&en->list, &sbi->extent_list);
|
|
|
|
spin_unlock(&sbi->extent_lock);
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
write_unlock(&et->lock);
|
2015-12-28 19:39:06 +00:00
|
|
|
return false;
|
2015-07-08 09:59:36 +00:00
|
|
|
}
|
|
|
|
|
2017-05-19 07:06:12 +00:00
|
|
|
bool f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext)
|
|
|
|
{
|
|
|
|
bool ret = __f2fs_init_extent_tree(inode, i_ext);
|
|
|
|
|
|
|
|
if (!F2FS_I(inode)->extent_tree)
|
|
|
|
set_inode_flag(inode, FI_NO_EXTENT);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-07-08 09:59:36 +00:00
|
|
|
static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
|
|
|
|
struct extent_info *ei)
|
|
|
|
{
|
|
|
|
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
|
|
|
struct extent_tree *et = F2FS_I(inode)->extent_tree;
|
|
|
|
struct extent_node *en;
|
|
|
|
bool ret = false;
|
|
|
|
|
|
|
|
f2fs_bug_on(sbi, !et);
|
|
|
|
|
|
|
|
trace_f2fs_lookup_extent_tree_start(inode, pgofs);
|
|
|
|
|
|
|
|
read_lock(&et->lock);
|
|
|
|
|
|
|
|
if (et->largest.fofs <= pgofs &&
|
|
|
|
et->largest.fofs + et->largest.len > pgofs) {
|
|
|
|
*ei = et->largest;
|
|
|
|
ret = true;
|
2015-08-19 11:12:20 +00:00
|
|
|
stat_inc_largest_node_hit(sbi);
|
2015-07-08 09:59:36 +00:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2017-04-11 01:25:22 +00:00
|
|
|
en = (struct extent_node *)__lookup_rb_tree(&et->root,
|
|
|
|
(struct rb_entry *)et->cached_en, pgofs);
|
|
|
|
if (!en)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (en == et->cached_en)
|
|
|
|
stat_inc_cached_node_hit(sbi);
|
|
|
|
else
|
|
|
|
stat_inc_rbtree_node_hit(sbi);
|
|
|
|
|
|
|
|
*ei = en->ei;
|
|
|
|
spin_lock(&sbi->extent_lock);
|
|
|
|
if (!list_empty(&en->list)) {
|
|
|
|
list_move_tail(&en->list, &sbi->extent_list);
|
|
|
|
et->cached_en = en;
|
2015-07-08 09:59:36 +00:00
|
|
|
}
|
2017-04-11 01:25:22 +00:00
|
|
|
spin_unlock(&sbi->extent_lock);
|
|
|
|
ret = true;
|
2015-07-08 09:59:36 +00:00
|
|
|
out:
|
2015-07-15 09:29:49 +00:00
|
|
|
stat_inc_total_hit(sbi);
|
2015-07-08 09:59:36 +00:00
|
|
|
read_unlock(&et->lock);
|
|
|
|
|
|
|
|
trace_f2fs_lookup_extent_tree_end(inode, pgofs, ei);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-05-20 16:52:20 +00:00
|
|
|
static struct extent_node *__try_merge_extent_node(struct inode *inode,
|
2015-07-15 10:05:17 +00:00
|
|
|
struct extent_tree *et, struct extent_info *ei,
|
|
|
|
struct extent_node *prev_ex,
|
2015-08-19 11:15:09 +00:00
|
|
|
struct extent_node *next_ex)
|
2015-07-15 10:05:17 +00:00
|
|
|
{
|
2016-05-20 16:52:20 +00:00
|
|
|
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
2015-07-15 10:05:17 +00:00
|
|
|
struct extent_node *en = NULL;
|
|
|
|
|
|
|
|
if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei)) {
|
|
|
|
prev_ex->ei.len += ei->len;
|
|
|
|
ei = &prev_ex->ei;
|
|
|
|
en = prev_ex;
|
|
|
|
}
|
2015-08-19 11:15:09 +00:00
|
|
|
|
2015-07-15 10:05:17 +00:00
|
|
|
if (next_ex && __is_front_mergeable(ei, &next_ex->ei)) {
|
|
|
|
next_ex->ei.fofs = ei->fofs;
|
|
|
|
next_ex->ei.blk = ei->blk;
|
|
|
|
next_ex->ei.len += ei->len;
|
2016-12-19 12:10:48 +00:00
|
|
|
if (en)
|
|
|
|
__release_extent_node(sbi, et, prev_ex);
|
|
|
|
|
2015-07-15 10:05:17 +00:00
|
|
|
en = next_ex;
|
|
|
|
}
|
2015-08-19 11:15:09 +00:00
|
|
|
|
2016-01-26 17:12:50 +00:00
|
|
|
if (!en)
|
|
|
|
return NULL;
|
|
|
|
|
2016-05-20 16:52:20 +00:00
|
|
|
__try_update_largest_extent(inode, et, en);
|
2016-01-26 17:12:50 +00:00
|
|
|
|
|
|
|
spin_lock(&sbi->extent_lock);
|
2016-01-26 17:20:05 +00:00
|
|
|
if (!list_empty(&en->list)) {
|
2016-01-26 17:12:50 +00:00
|
|
|
list_move_tail(&en->list, &sbi->extent_list);
|
2016-01-26 17:20:05 +00:00
|
|
|
et->cached_en = en;
|
|
|
|
}
|
2016-01-26 17:12:50 +00:00
|
|
|
spin_unlock(&sbi->extent_lock);
|
2015-08-19 11:15:09 +00:00
|
|
|
return en;
|
|
|
|
}
|
|
|
|
|
2016-05-20 16:52:20 +00:00
|
|
|
static struct extent_node *__insert_extent_tree(struct inode *inode,
|
2015-08-19 11:15:09 +00:00
|
|
|
struct extent_tree *et, struct extent_info *ei,
|
|
|
|
struct rb_node **insert_p,
|
|
|
|
struct rb_node *insert_parent)
|
|
|
|
{
|
2016-05-20 16:52:20 +00:00
|
|
|
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
2018-02-21 18:13:40 +00:00
|
|
|
struct rb_node **p;
|
2015-08-19 11:15:09 +00:00
|
|
|
struct rb_node *parent = NULL;
|
|
|
|
struct extent_node *en = NULL;
|
2015-07-15 10:05:17 +00:00
|
|
|
|
|
|
|
if (insert_p && insert_parent) {
|
|
|
|
parent = insert_parent;
|
|
|
|
p = insert_p;
|
|
|
|
goto do_insert;
|
|
|
|
}
|
|
|
|
|
2017-04-11 01:25:22 +00:00
|
|
|
p = __lookup_rb_tree_for_insert(sbi, &et->root, &parent, ei->fofs);
|
2015-07-15 10:05:17 +00:00
|
|
|
do_insert:
|
|
|
|
en = __attach_extent_node(sbi, et, ei, parent, p);
|
|
|
|
if (!en)
|
|
|
|
return NULL;
|
2015-08-19 11:15:09 +00:00
|
|
|
|
2016-05-20 16:52:20 +00:00
|
|
|
__try_update_largest_extent(inode, et, en);
|
2016-01-26 17:12:50 +00:00
|
|
|
|
|
|
|
/* update in global extent list */
|
|
|
|
spin_lock(&sbi->extent_lock);
|
|
|
|
list_add_tail(&en->list, &sbi->extent_list);
|
2016-01-26 17:20:05 +00:00
|
|
|
et->cached_en = en;
|
2016-01-26 17:12:50 +00:00
|
|
|
spin_unlock(&sbi->extent_lock);
|
2015-07-15 10:05:17 +00:00
|
|
|
return en;
|
|
|
|
}
|
|
|
|
|
2017-02-25 09:29:54 +00:00
|
|
|
static void f2fs_update_extent_tree_range(struct inode *inode,
|
f2fs: update extent tree in batches
This patch introduce a new helper f2fs_update_extent_tree_range which can
do extent mapping update at a specified range.
The main idea is:
1) punch all mapping info in extent node(s) which are at a specified range;
2) try to merge new extent mapping with adjacent node, or failing that,
insert the mapping into extent tree as a new node.
In order to see the benefit, I add a function for stating time stamping
count as below:
uint64_t rdtsc(void)
{
uint32_t lo, hi;
__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
return (uint64_t)hi << 32 | lo;
}
My test environment is: ubuntu, intel i7-3770, 16G memory, 256g micron ssd.
truncation path: update extent cache from truncate_data_blocks_range
non-truncataion path: update extent cache from other paths
total: all update paths
a) Removing 128MB file which has one extent node mapping whole range of
file:
1. dd if=/dev/zero of=/mnt/f2fs/128M bs=1M count=128
2. sync
3. rm /mnt/f2fs/128M
Before:
total count average
truncation: 7651022 32768 233.49
Patched:
total count average
truncation: 3321 33 100.64
b) fsstress:
fsstress -d /mnt/f2fs -l 5 -n 100 -p 20
Test times: 5 times.
Before:
total count average
truncation: 5812480.6 20911.6 277.95
non-truncation: 7783845.6 13440.8 579.12
total: 13596326.2 34352.4 395.79
Patched:
total count average
truncation: 1281283.0 3041.6 421.25
non-truncation: 7355844.4 13662.8 538.38
total: 8637127.4 16704.4 517.06
1) For the updates in truncation path:
- we can see updating in batches leads total tsc and update count reducing
explicitly;
- besides, for a single batched updating, punching multiple extent nodes
in a loop, result in executing more operations, so our average tsc
increase intensively.
2) For the updates in non-truncation path:
- there is a little improvement, that is because for the scenario that we
just need to update in the head or tail of extent node, new interface
optimize to update info in extent node directly, rather than removing
original extent node for updating and then inserting that updated one
into cache as new node.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2015-08-26 12:34:48 +00:00
|
|
|
pgoff_t fofs, block_t blkaddr, unsigned int len)
|
2015-07-08 09:59:36 +00:00
|
|
|
{
|
|
|
|
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
|
|
|
struct extent_tree *et = F2FS_I(inode)->extent_tree;
|
2015-09-17 10:42:06 +00:00
|
|
|
struct extent_node *en = NULL, *en1 = NULL;
|
f2fs: update extent tree in batches
This patch introduce a new helper f2fs_update_extent_tree_range which can
do extent mapping update at a specified range.
The main idea is:
1) punch all mapping info in extent node(s) which are at a specified range;
2) try to merge new extent mapping with adjacent node, or failing that,
insert the mapping into extent tree as a new node.
In order to see the benefit, I add a function for stating time stamping
count as below:
uint64_t rdtsc(void)
{
uint32_t lo, hi;
__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
return (uint64_t)hi << 32 | lo;
}
My test environment is: ubuntu, intel i7-3770, 16G memory, 256g micron ssd.
truncation path: update extent cache from truncate_data_blocks_range
non-truncataion path: update extent cache from other paths
total: all update paths
a) Removing 128MB file which has one extent node mapping whole range of
file:
1. dd if=/dev/zero of=/mnt/f2fs/128M bs=1M count=128
2. sync
3. rm /mnt/f2fs/128M
Before:
total count average
truncation: 7651022 32768 233.49
Patched:
total count average
truncation: 3321 33 100.64
b) fsstress:
fsstress -d /mnt/f2fs -l 5 -n 100 -p 20
Test times: 5 times.
Before:
total count average
truncation: 5812480.6 20911.6 277.95
non-truncation: 7783845.6 13440.8 579.12
total: 13596326.2 34352.4 395.79
Patched:
total count average
truncation: 1281283.0 3041.6 421.25
non-truncation: 7355844.4 13662.8 538.38
total: 8637127.4 16704.4 517.06
1) For the updates in truncation path:
- we can see updating in batches leads total tsc and update count reducing
explicitly;
- besides, for a single batched updating, punching multiple extent nodes
in a loop, result in executing more operations, so our average tsc
increase intensively.
2) For the updates in non-truncation path:
- there is a little improvement, that is because for the scenario that we
just need to update in the head or tail of extent node, new interface
optimize to update info in extent node directly, rather than removing
original extent node for updating and then inserting that updated one
into cache as new node.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2015-08-26 12:34:48 +00:00
|
|
|
struct extent_node *prev_en = NULL, *next_en = NULL;
|
2015-07-08 09:59:36 +00:00
|
|
|
struct extent_info ei, dei, prev;
|
2015-07-15 10:05:17 +00:00
|
|
|
struct rb_node **insert_p = NULL, *insert_parent = NULL;
|
f2fs: update extent tree in batches
This patch introduce a new helper f2fs_update_extent_tree_range which can
do extent mapping update at a specified range.
The main idea is:
1) punch all mapping info in extent node(s) which are at a specified range;
2) try to merge new extent mapping with adjacent node, or failing that,
insert the mapping into extent tree as a new node.
In order to see the benefit, I add a function for stating time stamping
count as below:
uint64_t rdtsc(void)
{
uint32_t lo, hi;
__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
return (uint64_t)hi << 32 | lo;
}
My test environment is: ubuntu, intel i7-3770, 16G memory, 256g micron ssd.
truncation path: update extent cache from truncate_data_blocks_range
non-truncataion path: update extent cache from other paths
total: all update paths
a) Removing 128MB file which has one extent node mapping whole range of
file:
1. dd if=/dev/zero of=/mnt/f2fs/128M bs=1M count=128
2. sync
3. rm /mnt/f2fs/128M
Before:
total count average
truncation: 7651022 32768 233.49
Patched:
total count average
truncation: 3321 33 100.64
b) fsstress:
fsstress -d /mnt/f2fs -l 5 -n 100 -p 20
Test times: 5 times.
Before:
total count average
truncation: 5812480.6 20911.6 277.95
non-truncation: 7783845.6 13440.8 579.12
total: 13596326.2 34352.4 395.79
Patched:
total count average
truncation: 1281283.0 3041.6 421.25
non-truncation: 7355844.4 13662.8 538.38
total: 8637127.4 16704.4 517.06
1) For the updates in truncation path:
- we can see updating in batches leads total tsc and update count reducing
explicitly;
- besides, for a single batched updating, punching multiple extent nodes
in a loop, result in executing more operations, so our average tsc
increase intensively.
2) For the updates in non-truncation path:
- there is a little improvement, that is because for the scenario that we
just need to update in the head or tail of extent node, new interface
optimize to update info in extent node directly, rather than removing
original extent node for updating and then inserting that updated one
into cache as new node.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2015-08-26 12:34:48 +00:00
|
|
|
unsigned int end = fofs + len;
|
|
|
|
unsigned int pos = (unsigned int)fofs;
|
2015-07-08 09:59:36 +00:00
|
|
|
|
|
|
|
if (!et)
|
2017-02-25 09:29:54 +00:00
|
|
|
return;
|
2015-07-08 09:59:36 +00:00
|
|
|
|
2015-09-06 09:50:13 +00:00
|
|
|
trace_f2fs_update_extent_tree_range(inode, fofs, blkaddr, len);
|
|
|
|
|
2015-07-08 09:59:36 +00:00
|
|
|
write_lock(&et->lock);
|
|
|
|
|
2016-05-20 17:13:22 +00:00
|
|
|
if (is_inode_flag_set(inode, FI_NO_EXTENT)) {
|
2015-07-08 09:59:36 +00:00
|
|
|
write_unlock(&et->lock);
|
2017-02-25 09:29:54 +00:00
|
|
|
return;
|
2015-07-08 09:59:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
prev = et->largest;
|
|
|
|
dei.len = 0;
|
|
|
|
|
2015-09-17 10:42:06 +00:00
|
|
|
/*
|
|
|
|
* drop largest extent before lookup, in case it's already
|
|
|
|
* been shrunk from extent tree
|
|
|
|
*/
|
2015-09-17 10:24:17 +00:00
|
|
|
__drop_largest_extent(inode, fofs, len);
|
2015-07-08 09:59:36 +00:00
|
|
|
|
f2fs: update extent tree in batches
This patch introduce a new helper f2fs_update_extent_tree_range which can
do extent mapping update at a specified range.
The main idea is:
1) punch all mapping info in extent node(s) which are at a specified range;
2) try to merge new extent mapping with adjacent node, or failing that,
insert the mapping into extent tree as a new node.
In order to see the benefit, I add a function for stating time stamping
count as below:
uint64_t rdtsc(void)
{
uint32_t lo, hi;
__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
return (uint64_t)hi << 32 | lo;
}
My test environment is: ubuntu, intel i7-3770, 16G memory, 256g micron ssd.
truncation path: update extent cache from truncate_data_blocks_range
non-truncataion path: update extent cache from other paths
total: all update paths
a) Removing 128MB file which has one extent node mapping whole range of
file:
1. dd if=/dev/zero of=/mnt/f2fs/128M bs=1M count=128
2. sync
3. rm /mnt/f2fs/128M
Before:
total count average
truncation: 7651022 32768 233.49
Patched:
total count average
truncation: 3321 33 100.64
b) fsstress:
fsstress -d /mnt/f2fs -l 5 -n 100 -p 20
Test times: 5 times.
Before:
total count average
truncation: 5812480.6 20911.6 277.95
non-truncation: 7783845.6 13440.8 579.12
total: 13596326.2 34352.4 395.79
Patched:
total count average
truncation: 1281283.0 3041.6 421.25
non-truncation: 7355844.4 13662.8 538.38
total: 8637127.4 16704.4 517.06
1) For the updates in truncation path:
- we can see updating in batches leads total tsc and update count reducing
explicitly;
- besides, for a single batched updating, punching multiple extent nodes
in a loop, result in executing more operations, so our average tsc
increase intensively.
2) For the updates in non-truncation path:
- there is a little improvement, that is because for the scenario that we
just need to update in the head or tail of extent node, new interface
optimize to update info in extent node directly, rather than removing
original extent node for updating and then inserting that updated one
into cache as new node.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2015-08-26 12:34:48 +00:00
|
|
|
/* 1. lookup first extent node in range [fofs, fofs + len - 1] */
|
2017-04-11 01:25:22 +00:00
|
|
|
en = (struct extent_node *)__lookup_rb_tree_ret(&et->root,
|
|
|
|
(struct rb_entry *)et->cached_en, fofs,
|
|
|
|
(struct rb_entry **)&prev_en,
|
|
|
|
(struct rb_entry **)&next_en,
|
2017-04-14 15:24:55 +00:00
|
|
|
&insert_p, &insert_parent, false);
|
2015-09-17 10:42:06 +00:00
|
|
|
if (!en)
|
|
|
|
en = next_en;
|
f2fs: update extent tree in batches
This patch introduce a new helper f2fs_update_extent_tree_range which can
do extent mapping update at a specified range.
The main idea is:
1) punch all mapping info in extent node(s) which are at a specified range;
2) try to merge new extent mapping with adjacent node, or failing that,
insert the mapping into extent tree as a new node.
In order to see the benefit, I add a function for stating time stamping
count as below:
uint64_t rdtsc(void)
{
uint32_t lo, hi;
__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
return (uint64_t)hi << 32 | lo;
}
My test environment is: ubuntu, intel i7-3770, 16G memory, 256g micron ssd.
truncation path: update extent cache from truncate_data_blocks_range
non-truncataion path: update extent cache from other paths
total: all update paths
a) Removing 128MB file which has one extent node mapping whole range of
file:
1. dd if=/dev/zero of=/mnt/f2fs/128M bs=1M count=128
2. sync
3. rm /mnt/f2fs/128M
Before:
total count average
truncation: 7651022 32768 233.49
Patched:
total count average
truncation: 3321 33 100.64
b) fsstress:
fsstress -d /mnt/f2fs -l 5 -n 100 -p 20
Test times: 5 times.
Before:
total count average
truncation: 5812480.6 20911.6 277.95
non-truncation: 7783845.6 13440.8 579.12
total: 13596326.2 34352.4 395.79
Patched:
total count average
truncation: 1281283.0 3041.6 421.25
non-truncation: 7355844.4 13662.8 538.38
total: 8637127.4 16704.4 517.06
1) For the updates in truncation path:
- we can see updating in batches leads total tsc and update count reducing
explicitly;
- besides, for a single batched updating, punching multiple extent nodes
in a loop, result in executing more operations, so our average tsc
increase intensively.
2) For the updates in non-truncation path:
- there is a little improvement, that is because for the scenario that we
just need to update in the head or tail of extent node, new interface
optimize to update info in extent node directly, rather than removing
original extent node for updating and then inserting that updated one
into cache as new node.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2015-08-26 12:34:48 +00:00
|
|
|
|
|
|
|
/* 2. invlidate all extent nodes in range [fofs, fofs + len - 1] */
|
2015-09-17 10:42:06 +00:00
|
|
|
while (en && en->ei.fofs < end) {
|
|
|
|
unsigned int org_end;
|
|
|
|
int parts = 0; /* # of parts current extent split into */
|
f2fs: update extent tree in batches
This patch introduce a new helper f2fs_update_extent_tree_range which can
do extent mapping update at a specified range.
The main idea is:
1) punch all mapping info in extent node(s) which are at a specified range;
2) try to merge new extent mapping with adjacent node, or failing that,
insert the mapping into extent tree as a new node.
In order to see the benefit, I add a function for stating time stamping
count as below:
uint64_t rdtsc(void)
{
uint32_t lo, hi;
__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
return (uint64_t)hi << 32 | lo;
}
My test environment is: ubuntu, intel i7-3770, 16G memory, 256g micron ssd.
truncation path: update extent cache from truncate_data_blocks_range
non-truncataion path: update extent cache from other paths
total: all update paths
a) Removing 128MB file which has one extent node mapping whole range of
file:
1. dd if=/dev/zero of=/mnt/f2fs/128M bs=1M count=128
2. sync
3. rm /mnt/f2fs/128M
Before:
total count average
truncation: 7651022 32768 233.49
Patched:
total count average
truncation: 3321 33 100.64
b) fsstress:
fsstress -d /mnt/f2fs -l 5 -n 100 -p 20
Test times: 5 times.
Before:
total count average
truncation: 5812480.6 20911.6 277.95
non-truncation: 7783845.6 13440.8 579.12
total: 13596326.2 34352.4 395.79
Patched:
total count average
truncation: 1281283.0 3041.6 421.25
non-truncation: 7355844.4 13662.8 538.38
total: 8637127.4 16704.4 517.06
1) For the updates in truncation path:
- we can see updating in batches leads total tsc and update count reducing
explicitly;
- besides, for a single batched updating, punching multiple extent nodes
in a loop, result in executing more operations, so our average tsc
increase intensively.
2) For the updates in non-truncation path:
- there is a little improvement, that is because for the scenario that we
just need to update in the head or tail of extent node, new interface
optimize to update info in extent node directly, rather than removing
original extent node for updating and then inserting that updated one
into cache as new node.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2015-08-26 12:34:48 +00:00
|
|
|
|
2015-09-17 10:42:06 +00:00
|
|
|
next_en = en1 = NULL;
|
f2fs: update extent tree in batches
This patch introduce a new helper f2fs_update_extent_tree_range which can
do extent mapping update at a specified range.
The main idea is:
1) punch all mapping info in extent node(s) which are at a specified range;
2) try to merge new extent mapping with adjacent node, or failing that,
insert the mapping into extent tree as a new node.
In order to see the benefit, I add a function for stating time stamping
count as below:
uint64_t rdtsc(void)
{
uint32_t lo, hi;
__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
return (uint64_t)hi << 32 | lo;
}
My test environment is: ubuntu, intel i7-3770, 16G memory, 256g micron ssd.
truncation path: update extent cache from truncate_data_blocks_range
non-truncataion path: update extent cache from other paths
total: all update paths
a) Removing 128MB file which has one extent node mapping whole range of
file:
1. dd if=/dev/zero of=/mnt/f2fs/128M bs=1M count=128
2. sync
3. rm /mnt/f2fs/128M
Before:
total count average
truncation: 7651022 32768 233.49
Patched:
total count average
truncation: 3321 33 100.64
b) fsstress:
fsstress -d /mnt/f2fs -l 5 -n 100 -p 20
Test times: 5 times.
Before:
total count average
truncation: 5812480.6 20911.6 277.95
non-truncation: 7783845.6 13440.8 579.12
total: 13596326.2 34352.4 395.79
Patched:
total count average
truncation: 1281283.0 3041.6 421.25
non-truncation: 7355844.4 13662.8 538.38
total: 8637127.4 16704.4 517.06
1) For the updates in truncation path:
- we can see updating in batches leads total tsc and update count reducing
explicitly;
- besides, for a single batched updating, punching multiple extent nodes
in a loop, result in executing more operations, so our average tsc
increase intensively.
2) For the updates in non-truncation path:
- there is a little improvement, that is because for the scenario that we
just need to update in the head or tail of extent node, new interface
optimize to update info in extent node directly, rather than removing
original extent node for updating and then inserting that updated one
into cache as new node.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2015-08-26 12:34:48 +00:00
|
|
|
|
|
|
|
dei = en->ei;
|
2015-09-17 10:42:06 +00:00
|
|
|
org_end = dei.fofs + dei.len;
|
|
|
|
f2fs_bug_on(sbi, pos >= org_end);
|
f2fs: update extent tree in batches
This patch introduce a new helper f2fs_update_extent_tree_range which can
do extent mapping update at a specified range.
The main idea is:
1) punch all mapping info in extent node(s) which are at a specified range;
2) try to merge new extent mapping with adjacent node, or failing that,
insert the mapping into extent tree as a new node.
In order to see the benefit, I add a function for stating time stamping
count as below:
uint64_t rdtsc(void)
{
uint32_t lo, hi;
__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
return (uint64_t)hi << 32 | lo;
}
My test environment is: ubuntu, intel i7-3770, 16G memory, 256g micron ssd.
truncation path: update extent cache from truncate_data_blocks_range
non-truncataion path: update extent cache from other paths
total: all update paths
a) Removing 128MB file which has one extent node mapping whole range of
file:
1. dd if=/dev/zero of=/mnt/f2fs/128M bs=1M count=128
2. sync
3. rm /mnt/f2fs/128M
Before:
total count average
truncation: 7651022 32768 233.49
Patched:
total count average
truncation: 3321 33 100.64
b) fsstress:
fsstress -d /mnt/f2fs -l 5 -n 100 -p 20
Test times: 5 times.
Before:
total count average
truncation: 5812480.6 20911.6 277.95
non-truncation: 7783845.6 13440.8 579.12
total: 13596326.2 34352.4 395.79
Patched:
total count average
truncation: 1281283.0 3041.6 421.25
non-truncation: 7355844.4 13662.8 538.38
total: 8637127.4 16704.4 517.06
1) For the updates in truncation path:
- we can see updating in batches leads total tsc and update count reducing
explicitly;
- besides, for a single batched updating, punching multiple extent nodes
in a loop, result in executing more operations, so our average tsc
increase intensively.
2) For the updates in non-truncation path:
- there is a little improvement, that is because for the scenario that we
just need to update in the head or tail of extent node, new interface
optimize to update info in extent node directly, rather than removing
original extent node for updating and then inserting that updated one
into cache as new node.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2015-08-26 12:34:48 +00:00
|
|
|
|
2015-09-17 10:42:06 +00:00
|
|
|
if (pos > dei.fofs && pos - dei.fofs >= F2FS_MIN_EXTENT_LEN) {
|
|
|
|
en->ei.len = pos - en->ei.fofs;
|
|
|
|
prev_en = en;
|
|
|
|
parts = 1;
|
|
|
|
}
|
f2fs: update extent tree in batches
This patch introduce a new helper f2fs_update_extent_tree_range which can
do extent mapping update at a specified range.
The main idea is:
1) punch all mapping info in extent node(s) which are at a specified range;
2) try to merge new extent mapping with adjacent node, or failing that,
insert the mapping into extent tree as a new node.
In order to see the benefit, I add a function for stating time stamping
count as below:
uint64_t rdtsc(void)
{
uint32_t lo, hi;
__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
return (uint64_t)hi << 32 | lo;
}
My test environment is: ubuntu, intel i7-3770, 16G memory, 256g micron ssd.
truncation path: update extent cache from truncate_data_blocks_range
non-truncataion path: update extent cache from other paths
total: all update paths
a) Removing 128MB file which has one extent node mapping whole range of
file:
1. dd if=/dev/zero of=/mnt/f2fs/128M bs=1M count=128
2. sync
3. rm /mnt/f2fs/128M
Before:
total count average
truncation: 7651022 32768 233.49
Patched:
total count average
truncation: 3321 33 100.64
b) fsstress:
fsstress -d /mnt/f2fs -l 5 -n 100 -p 20
Test times: 5 times.
Before:
total count average
truncation: 5812480.6 20911.6 277.95
non-truncation: 7783845.6 13440.8 579.12
total: 13596326.2 34352.4 395.79
Patched:
total count average
truncation: 1281283.0 3041.6 421.25
non-truncation: 7355844.4 13662.8 538.38
total: 8637127.4 16704.4 517.06
1) For the updates in truncation path:
- we can see updating in batches leads total tsc and update count reducing
explicitly;
- besides, for a single batched updating, punching multiple extent nodes
in a loop, result in executing more operations, so our average tsc
increase intensively.
2) For the updates in non-truncation path:
- there is a little improvement, that is because for the scenario that we
just need to update in the head or tail of extent node, new interface
optimize to update info in extent node directly, rather than removing
original extent node for updating and then inserting that updated one
into cache as new node.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2015-08-26 12:34:48 +00:00
|
|
|
|
2015-09-17 10:42:06 +00:00
|
|
|
if (end < org_end && org_end - end >= F2FS_MIN_EXTENT_LEN) {
|
|
|
|
if (parts) {
|
|
|
|
set_extent_info(&ei, end,
|
|
|
|
end - dei.fofs + dei.blk,
|
|
|
|
org_end - end);
|
2016-05-20 16:52:20 +00:00
|
|
|
en1 = __insert_extent_tree(inode, et, &ei,
|
2015-09-17 10:42:06 +00:00
|
|
|
NULL, NULL);
|
|
|
|
next_en = en1;
|
|
|
|
} else {
|
|
|
|
en->ei.fofs = end;
|
|
|
|
en->ei.blk += end - dei.fofs;
|
|
|
|
en->ei.len -= end - dei.fofs;
|
|
|
|
next_en = en;
|
f2fs: update extent tree in batches
This patch introduce a new helper f2fs_update_extent_tree_range which can
do extent mapping update at a specified range.
The main idea is:
1) punch all mapping info in extent node(s) which are at a specified range;
2) try to merge new extent mapping with adjacent node, or failing that,
insert the mapping into extent tree as a new node.
In order to see the benefit, I add a function for stating time stamping
count as below:
uint64_t rdtsc(void)
{
uint32_t lo, hi;
__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
return (uint64_t)hi << 32 | lo;
}
My test environment is: ubuntu, intel i7-3770, 16G memory, 256g micron ssd.
truncation path: update extent cache from truncate_data_blocks_range
non-truncataion path: update extent cache from other paths
total: all update paths
a) Removing 128MB file which has one extent node mapping whole range of
file:
1. dd if=/dev/zero of=/mnt/f2fs/128M bs=1M count=128
2. sync
3. rm /mnt/f2fs/128M
Before:
total count average
truncation: 7651022 32768 233.49
Patched:
total count average
truncation: 3321 33 100.64
b) fsstress:
fsstress -d /mnt/f2fs -l 5 -n 100 -p 20
Test times: 5 times.
Before:
total count average
truncation: 5812480.6 20911.6 277.95
non-truncation: 7783845.6 13440.8 579.12
total: 13596326.2 34352.4 395.79
Patched:
total count average
truncation: 1281283.0 3041.6 421.25
non-truncation: 7355844.4 13662.8 538.38
total: 8637127.4 16704.4 517.06
1) For the updates in truncation path:
- we can see updating in batches leads total tsc and update count reducing
explicitly;
- besides, for a single batched updating, punching multiple extent nodes
in a loop, result in executing more operations, so our average tsc
increase intensively.
2) For the updates in non-truncation path:
- there is a little improvement, that is because for the scenario that we
just need to update in the head or tail of extent node, new interface
optimize to update info in extent node directly, rather than removing
original extent node for updating and then inserting that updated one
into cache as new node.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2015-08-26 12:34:48 +00:00
|
|
|
}
|
2015-09-17 10:42:06 +00:00
|
|
|
parts++;
|
f2fs: update extent tree in batches
This patch introduce a new helper f2fs_update_extent_tree_range which can
do extent mapping update at a specified range.
The main idea is:
1) punch all mapping info in extent node(s) which are at a specified range;
2) try to merge new extent mapping with adjacent node, or failing that,
insert the mapping into extent tree as a new node.
In order to see the benefit, I add a function for stating time stamping
count as below:
uint64_t rdtsc(void)
{
uint32_t lo, hi;
__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
return (uint64_t)hi << 32 | lo;
}
My test environment is: ubuntu, intel i7-3770, 16G memory, 256g micron ssd.
truncation path: update extent cache from truncate_data_blocks_range
non-truncataion path: update extent cache from other paths
total: all update paths
a) Removing 128MB file which has one extent node mapping whole range of
file:
1. dd if=/dev/zero of=/mnt/f2fs/128M bs=1M count=128
2. sync
3. rm /mnt/f2fs/128M
Before:
total count average
truncation: 7651022 32768 233.49
Patched:
total count average
truncation: 3321 33 100.64
b) fsstress:
fsstress -d /mnt/f2fs -l 5 -n 100 -p 20
Test times: 5 times.
Before:
total count average
truncation: 5812480.6 20911.6 277.95
non-truncation: 7783845.6 13440.8 579.12
total: 13596326.2 34352.4 395.79
Patched:
total count average
truncation: 1281283.0 3041.6 421.25
non-truncation: 7355844.4 13662.8 538.38
total: 8637127.4 16704.4 517.06
1) For the updates in truncation path:
- we can see updating in batches leads total tsc and update count reducing
explicitly;
- besides, for a single batched updating, punching multiple extent nodes
in a loop, result in executing more operations, so our average tsc
increase intensively.
2) For the updates in non-truncation path:
- there is a little improvement, that is because for the scenario that we
just need to update in the head or tail of extent node, new interface
optimize to update info in extent node directly, rather than removing
original extent node for updating and then inserting that updated one
into cache as new node.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2015-08-26 12:34:48 +00:00
|
|
|
}
|
|
|
|
|
2015-09-17 10:42:06 +00:00
|
|
|
if (!next_en) {
|
|
|
|
struct rb_node *node = rb_next(&en->rb_node);
|
f2fs: update extent tree in batches
This patch introduce a new helper f2fs_update_extent_tree_range which can
do extent mapping update at a specified range.
The main idea is:
1) punch all mapping info in extent node(s) which are at a specified range;
2) try to merge new extent mapping with adjacent node, or failing that,
insert the mapping into extent tree as a new node.
In order to see the benefit, I add a function for stating time stamping
count as below:
uint64_t rdtsc(void)
{
uint32_t lo, hi;
__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
return (uint64_t)hi << 32 | lo;
}
My test environment is: ubuntu, intel i7-3770, 16G memory, 256g micron ssd.
truncation path: update extent cache from truncate_data_blocks_range
non-truncataion path: update extent cache from other paths
total: all update paths
a) Removing 128MB file which has one extent node mapping whole range of
file:
1. dd if=/dev/zero of=/mnt/f2fs/128M bs=1M count=128
2. sync
3. rm /mnt/f2fs/128M
Before:
total count average
truncation: 7651022 32768 233.49
Patched:
total count average
truncation: 3321 33 100.64
b) fsstress:
fsstress -d /mnt/f2fs -l 5 -n 100 -p 20
Test times: 5 times.
Before:
total count average
truncation: 5812480.6 20911.6 277.95
non-truncation: 7783845.6 13440.8 579.12
total: 13596326.2 34352.4 395.79
Patched:
total count average
truncation: 1281283.0 3041.6 421.25
non-truncation: 7355844.4 13662.8 538.38
total: 8637127.4 16704.4 517.06
1) For the updates in truncation path:
- we can see updating in batches leads total tsc and update count reducing
explicitly;
- besides, for a single batched updating, punching multiple extent nodes
in a loop, result in executing more operations, so our average tsc
increase intensively.
2) For the updates in non-truncation path:
- there is a little improvement, that is because for the scenario that we
just need to update in the head or tail of extent node, new interface
optimize to update info in extent node directly, rather than removing
original extent node for updating and then inserting that updated one
into cache as new node.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2015-08-26 12:34:48 +00:00
|
|
|
|
2016-12-20 13:57:42 +00:00
|
|
|
next_en = rb_entry_safe(node, struct extent_node,
|
|
|
|
rb_node);
|
2015-07-08 09:59:36 +00:00
|
|
|
}
|
|
|
|
|
2015-09-22 13:07:47 +00:00
|
|
|
if (parts)
|
2016-05-20 16:52:20 +00:00
|
|
|
__try_update_largest_extent(inode, et, en);
|
2015-09-22 13:07:47 +00:00
|
|
|
else
|
2016-01-26 12:56:25 +00:00
|
|
|
__release_extent_node(sbi, et, en);
|
f2fs: update extent tree in batches
This patch introduce a new helper f2fs_update_extent_tree_range which can
do extent mapping update at a specified range.
The main idea is:
1) punch all mapping info in extent node(s) which are at a specified range;
2) try to merge new extent mapping with adjacent node, or failing that,
insert the mapping into extent tree as a new node.
In order to see the benefit, I add a function for stating time stamping
count as below:
uint64_t rdtsc(void)
{
uint32_t lo, hi;
__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
return (uint64_t)hi << 32 | lo;
}
My test environment is: ubuntu, intel i7-3770, 16G memory, 256g micron ssd.
truncation path: update extent cache from truncate_data_blocks_range
non-truncataion path: update extent cache from other paths
total: all update paths
a) Removing 128MB file which has one extent node mapping whole range of
file:
1. dd if=/dev/zero of=/mnt/f2fs/128M bs=1M count=128
2. sync
3. rm /mnt/f2fs/128M
Before:
total count average
truncation: 7651022 32768 233.49
Patched:
total count average
truncation: 3321 33 100.64
b) fsstress:
fsstress -d /mnt/f2fs -l 5 -n 100 -p 20
Test times: 5 times.
Before:
total count average
truncation: 5812480.6 20911.6 277.95
non-truncation: 7783845.6 13440.8 579.12
total: 13596326.2 34352.4 395.79
Patched:
total count average
truncation: 1281283.0 3041.6 421.25
non-truncation: 7355844.4 13662.8 538.38
total: 8637127.4 16704.4 517.06
1) For the updates in truncation path:
- we can see updating in batches leads total tsc and update count reducing
explicitly;
- besides, for a single batched updating, punching multiple extent nodes
in a loop, result in executing more operations, so our average tsc
increase intensively.
2) For the updates in non-truncation path:
- there is a little improvement, that is because for the scenario that we
just need to update in the head or tail of extent node, new interface
optimize to update info in extent node directly, rather than removing
original extent node for updating and then inserting that updated one
into cache as new node.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2015-08-26 12:34:48 +00:00
|
|
|
|
|
|
|
/*
|
2015-09-17 10:42:06 +00:00
|
|
|
* if original extent is split into zero or two parts, extent
|
|
|
|
* tree has been altered by deletion or insertion, therefore
|
|
|
|
* invalidate pointers regard to tree.
|
f2fs: update extent tree in batches
This patch introduce a new helper f2fs_update_extent_tree_range which can
do extent mapping update at a specified range.
The main idea is:
1) punch all mapping info in extent node(s) which are at a specified range;
2) try to merge new extent mapping with adjacent node, or failing that,
insert the mapping into extent tree as a new node.
In order to see the benefit, I add a function for stating time stamping
count as below:
uint64_t rdtsc(void)
{
uint32_t lo, hi;
__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
return (uint64_t)hi << 32 | lo;
}
My test environment is: ubuntu, intel i7-3770, 16G memory, 256g micron ssd.
truncation path: update extent cache from truncate_data_blocks_range
non-truncataion path: update extent cache from other paths
total: all update paths
a) Removing 128MB file which has one extent node mapping whole range of
file:
1. dd if=/dev/zero of=/mnt/f2fs/128M bs=1M count=128
2. sync
3. rm /mnt/f2fs/128M
Before:
total count average
truncation: 7651022 32768 233.49
Patched:
total count average
truncation: 3321 33 100.64
b) fsstress:
fsstress -d /mnt/f2fs -l 5 -n 100 -p 20
Test times: 5 times.
Before:
total count average
truncation: 5812480.6 20911.6 277.95
non-truncation: 7783845.6 13440.8 579.12
total: 13596326.2 34352.4 395.79
Patched:
total count average
truncation: 1281283.0 3041.6 421.25
non-truncation: 7355844.4 13662.8 538.38
total: 8637127.4 16704.4 517.06
1) For the updates in truncation path:
- we can see updating in batches leads total tsc and update count reducing
explicitly;
- besides, for a single batched updating, punching multiple extent nodes
in a loop, result in executing more operations, so our average tsc
increase intensively.
2) For the updates in non-truncation path:
- there is a little improvement, that is because for the scenario that we
just need to update in the head or tail of extent node, new interface
optimize to update info in extent node directly, rather than removing
original extent node for updating and then inserting that updated one
into cache as new node.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2015-08-26 12:34:48 +00:00
|
|
|
*/
|
2015-09-17 10:42:06 +00:00
|
|
|
if (parts != 1) {
|
|
|
|
insert_p = NULL;
|
|
|
|
insert_parent = NULL;
|
2015-07-08 09:59:36 +00:00
|
|
|
}
|
2015-09-17 10:42:06 +00:00
|
|
|
en = next_en;
|
2015-07-08 09:59:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* 3. update extent in extent cache */
|
|
|
|
if (blkaddr) {
|
f2fs: update extent tree in batches
This patch introduce a new helper f2fs_update_extent_tree_range which can
do extent mapping update at a specified range.
The main idea is:
1) punch all mapping info in extent node(s) which are at a specified range;
2) try to merge new extent mapping with adjacent node, or failing that,
insert the mapping into extent tree as a new node.
In order to see the benefit, I add a function for stating time stamping
count as below:
uint64_t rdtsc(void)
{
uint32_t lo, hi;
__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
return (uint64_t)hi << 32 | lo;
}
My test environment is: ubuntu, intel i7-3770, 16G memory, 256g micron ssd.
truncation path: update extent cache from truncate_data_blocks_range
non-truncataion path: update extent cache from other paths
total: all update paths
a) Removing 128MB file which has one extent node mapping whole range of
file:
1. dd if=/dev/zero of=/mnt/f2fs/128M bs=1M count=128
2. sync
3. rm /mnt/f2fs/128M
Before:
total count average
truncation: 7651022 32768 233.49
Patched:
total count average
truncation: 3321 33 100.64
b) fsstress:
fsstress -d /mnt/f2fs -l 5 -n 100 -p 20
Test times: 5 times.
Before:
total count average
truncation: 5812480.6 20911.6 277.95
non-truncation: 7783845.6 13440.8 579.12
total: 13596326.2 34352.4 395.79
Patched:
total count average
truncation: 1281283.0 3041.6 421.25
non-truncation: 7355844.4 13662.8 538.38
total: 8637127.4 16704.4 517.06
1) For the updates in truncation path:
- we can see updating in batches leads total tsc and update count reducing
explicitly;
- besides, for a single batched updating, punching multiple extent nodes
in a loop, result in executing more operations, so our average tsc
increase intensively.
2) For the updates in non-truncation path:
- there is a little improvement, that is because for the scenario that we
just need to update in the head or tail of extent node, new interface
optimize to update info in extent node directly, rather than removing
original extent node for updating and then inserting that updated one
into cache as new node.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2015-08-26 12:34:48 +00:00
|
|
|
|
|
|
|
set_extent_info(&ei, fofs, blkaddr, len);
|
2016-05-20 16:52:20 +00:00
|
|
|
if (!__try_merge_extent_node(inode, et, &ei, prev_en, next_en))
|
|
|
|
__insert_extent_tree(inode, et, &ei,
|
2015-08-19 11:15:09 +00:00
|
|
|
insert_p, insert_parent);
|
2015-07-08 09:59:36 +00:00
|
|
|
|
|
|
|
/* give up extent_cache, if split and small updates happen */
|
|
|
|
if (dei.len >= 1 &&
|
|
|
|
prev.len < F2FS_MIN_EXTENT_LEN &&
|
|
|
|
et->largest.len < F2FS_MIN_EXTENT_LEN) {
|
2016-05-20 16:52:20 +00:00
|
|
|
__drop_largest_extent(inode, 0, UINT_MAX);
|
2016-05-20 17:13:22 +00:00
|
|
|
set_inode_flag(inode, FI_NO_EXTENT);
|
2015-07-08 09:59:36 +00:00
|
|
|
}
|
f2fs: update extent tree in batches
This patch introduce a new helper f2fs_update_extent_tree_range which can
do extent mapping update at a specified range.
The main idea is:
1) punch all mapping info in extent node(s) which are at a specified range;
2) try to merge new extent mapping with adjacent node, or failing that,
insert the mapping into extent tree as a new node.
In order to see the benefit, I add a function for stating time stamping
count as below:
uint64_t rdtsc(void)
{
uint32_t lo, hi;
__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
return (uint64_t)hi << 32 | lo;
}
My test environment is: ubuntu, intel i7-3770, 16G memory, 256g micron ssd.
truncation path: update extent cache from truncate_data_blocks_range
non-truncataion path: update extent cache from other paths
total: all update paths
a) Removing 128MB file which has one extent node mapping whole range of
file:
1. dd if=/dev/zero of=/mnt/f2fs/128M bs=1M count=128
2. sync
3. rm /mnt/f2fs/128M
Before:
total count average
truncation: 7651022 32768 233.49
Patched:
total count average
truncation: 3321 33 100.64
b) fsstress:
fsstress -d /mnt/f2fs -l 5 -n 100 -p 20
Test times: 5 times.
Before:
total count average
truncation: 5812480.6 20911.6 277.95
non-truncation: 7783845.6 13440.8 579.12
total: 13596326.2 34352.4 395.79
Patched:
total count average
truncation: 1281283.0 3041.6 421.25
non-truncation: 7355844.4 13662.8 538.38
total: 8637127.4 16704.4 517.06
1) For the updates in truncation path:
- we can see updating in batches leads total tsc and update count reducing
explicitly;
- besides, for a single batched updating, punching multiple extent nodes
in a loop, result in executing more operations, so our average tsc
increase intensively.
2) For the updates in non-truncation path:
- there is a little improvement, that is because for the scenario that we
just need to update in the head or tail of extent node, new interface
optimize to update info in extent node directly, rather than removing
original extent node for updating and then inserting that updated one
into cache as new node.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2015-08-26 12:34:48 +00:00
|
|
|
}
|
2015-07-08 09:59:36 +00:00
|
|
|
|
2016-05-20 17:13:22 +00:00
|
|
|
if (is_inode_flag_set(inode, FI_NO_EXTENT))
|
2016-01-26 12:56:26 +00:00
|
|
|
__free_extent_tree(sbi, et);
|
2015-07-08 09:59:36 +00:00
|
|
|
|
|
|
|
write_unlock(&et->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
|
|
|
|
{
|
2015-12-31 23:02:16 +00:00
|
|
|
struct extent_tree *et, *next;
|
2016-01-26 12:56:26 +00:00
|
|
|
struct extent_node *en;
|
2015-07-08 09:59:36 +00:00
|
|
|
unsigned int node_cnt = 0, tree_cnt = 0;
|
|
|
|
int remained;
|
|
|
|
|
|
|
|
if (!test_opt(sbi, EXTENT_CACHE))
|
|
|
|
return 0;
|
|
|
|
|
2015-12-22 03:25:50 +00:00
|
|
|
if (!atomic_read(&sbi->total_zombie_tree))
|
|
|
|
goto free_node;
|
|
|
|
|
2017-02-23 11:39:59 +00:00
|
|
|
if (!mutex_trylock(&sbi->extent_tree_lock))
|
2015-07-08 09:59:36 +00:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* 1. remove unreferenced extent tree */
|
2015-12-31 23:02:16 +00:00
|
|
|
list_for_each_entry_safe(et, next, &sbi->zombie_list, list) {
|
2016-01-08 12:24:00 +00:00
|
|
|
if (atomic_read(&et->node_cnt)) {
|
|
|
|
write_lock(&et->lock);
|
2016-01-26 12:56:26 +00:00
|
|
|
node_cnt += __free_extent_tree(sbi, et);
|
2016-01-08 12:24:00 +00:00
|
|
|
write_unlock(&et->lock);
|
|
|
|
}
|
2016-01-26 12:56:26 +00:00
|
|
|
f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
|
2015-12-31 23:02:16 +00:00
|
|
|
list_del_init(&et->list);
|
|
|
|
radix_tree_delete(&sbi->extent_tree_root, et->ino);
|
|
|
|
kmem_cache_free(extent_tree_slab, et);
|
|
|
|
atomic_dec(&sbi->total_ext_tree);
|
|
|
|
atomic_dec(&sbi->total_zombie_tree);
|
|
|
|
tree_cnt++;
|
2015-07-08 09:59:36 +00:00
|
|
|
|
2015-12-31 23:02:16 +00:00
|
|
|
if (node_cnt + tree_cnt >= nr_shrink)
|
|
|
|
goto unlock_out;
|
2016-01-19 23:31:48 +00:00
|
|
|
cond_resched();
|
2015-07-08 09:59:36 +00:00
|
|
|
}
|
2017-02-23 11:39:59 +00:00
|
|
|
mutex_unlock(&sbi->extent_tree_lock);
|
2015-07-08 09:59:36 +00:00
|
|
|
|
2015-12-22 03:25:50 +00:00
|
|
|
free_node:
|
2015-07-08 09:59:36 +00:00
|
|
|
/* 2. remove LRU extent entries */
|
2017-02-23 11:39:59 +00:00
|
|
|
if (!mutex_trylock(&sbi->extent_tree_lock))
|
2015-07-08 09:59:36 +00:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
remained = nr_shrink - (node_cnt + tree_cnt);
|
|
|
|
|
|
|
|
spin_lock(&sbi->extent_lock);
|
2016-01-26 12:56:26 +00:00
|
|
|
for (; remained > 0; remained--) {
|
|
|
|
if (list_empty(&sbi->extent_list))
|
2015-07-08 09:59:36 +00:00
|
|
|
break;
|
2016-01-26 12:56:26 +00:00
|
|
|
en = list_first_entry(&sbi->extent_list,
|
|
|
|
struct extent_node, list);
|
|
|
|
et = en->et;
|
|
|
|
if (!write_trylock(&et->lock)) {
|
|
|
|
/* refresh this extent node's position in extent list */
|
|
|
|
list_move_tail(&en->list, &sbi->extent_list);
|
|
|
|
continue;
|
|
|
|
}
|
2015-07-08 09:59:36 +00:00
|
|
|
|
2016-01-26 12:56:26 +00:00
|
|
|
list_del_init(&en->list);
|
|
|
|
spin_unlock(&sbi->extent_lock);
|
2016-01-08 12:24:00 +00:00
|
|
|
|
2016-01-26 12:56:26 +00:00
|
|
|
__detach_extent_node(sbi, et, en);
|
2015-07-08 09:59:36 +00:00
|
|
|
|
2016-01-26 12:56:26 +00:00
|
|
|
write_unlock(&et->lock);
|
|
|
|
node_cnt++;
|
|
|
|
spin_lock(&sbi->extent_lock);
|
2015-07-08 09:59:36 +00:00
|
|
|
}
|
2016-01-26 12:56:26 +00:00
|
|
|
spin_unlock(&sbi->extent_lock);
|
|
|
|
|
2015-07-08 09:59:36 +00:00
|
|
|
unlock_out:
|
2017-02-23 11:39:59 +00:00
|
|
|
mutex_unlock(&sbi->extent_tree_lock);
|
2015-07-08 09:59:36 +00:00
|
|
|
out:
|
|
|
|
trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt);
|
|
|
|
|
|
|
|
return node_cnt + tree_cnt;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned int f2fs_destroy_extent_node(struct inode *inode)
|
|
|
|
{
|
|
|
|
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
|
|
|
struct extent_tree *et = F2FS_I(inode)->extent_tree;
|
|
|
|
unsigned int node_cnt = 0;
|
|
|
|
|
2016-01-08 12:24:00 +00:00
|
|
|
if (!et || !atomic_read(&et->node_cnt))
|
2015-07-08 09:59:36 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
write_lock(&et->lock);
|
2016-01-26 12:56:26 +00:00
|
|
|
node_cnt = __free_extent_tree(sbi, et);
|
2015-07-08 09:59:36 +00:00
|
|
|
write_unlock(&et->lock);
|
|
|
|
|
|
|
|
return node_cnt;
|
|
|
|
}
|
|
|
|
|
2016-07-12 18:07:52 +00:00
|
|
|
void f2fs_drop_extent_tree(struct inode *inode)
|
|
|
|
{
|
|
|
|
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
|
|
|
struct extent_tree *et = F2FS_I(inode)->extent_tree;
|
|
|
|
|
2018-01-27 09:29:49 +00:00
|
|
|
if (!f2fs_may_extent_tree(inode))
|
|
|
|
return;
|
|
|
|
|
2016-07-12 18:07:52 +00:00
|
|
|
set_inode_flag(inode, FI_NO_EXTENT);
|
|
|
|
|
|
|
|
write_lock(&et->lock);
|
|
|
|
__free_extent_tree(sbi, et);
|
|
|
|
__drop_largest_extent(inode, 0, UINT_MAX);
|
|
|
|
write_unlock(&et->lock);
|
|
|
|
}
|
|
|
|
|
2015-07-08 09:59:36 +00:00
|
|
|
void f2fs_destroy_extent_tree(struct inode *inode)
|
|
|
|
{
|
|
|
|
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
|
|
|
struct extent_tree *et = F2FS_I(inode)->extent_tree;
|
|
|
|
unsigned int node_cnt = 0;
|
|
|
|
|
|
|
|
if (!et)
|
|
|
|
return;
|
|
|
|
|
2016-01-08 12:22:52 +00:00
|
|
|
if (inode->i_nlink && !is_bad_inode(inode) &&
|
|
|
|
atomic_read(&et->node_cnt)) {
|
2017-02-23 11:39:59 +00:00
|
|
|
mutex_lock(&sbi->extent_tree_lock);
|
2015-12-31 23:02:16 +00:00
|
|
|
list_add_tail(&et->list, &sbi->zombie_list);
|
2015-12-22 03:25:50 +00:00
|
|
|
atomic_inc(&sbi->total_zombie_tree);
|
2017-02-23 11:39:59 +00:00
|
|
|
mutex_unlock(&sbi->extent_tree_lock);
|
2015-07-08 09:59:36 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* free all extent info belong to this extent tree */
|
|
|
|
node_cnt = f2fs_destroy_extent_node(inode);
|
|
|
|
|
|
|
|
/* delete extent tree entry in radix tree */
|
2017-02-23 11:39:59 +00:00
|
|
|
mutex_lock(&sbi->extent_tree_lock);
|
2016-01-08 12:22:52 +00:00
|
|
|
f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
|
2015-07-08 09:59:36 +00:00
|
|
|
radix_tree_delete(&sbi->extent_tree_root, inode->i_ino);
|
|
|
|
kmem_cache_free(extent_tree_slab, et);
|
2015-12-22 03:20:15 +00:00
|
|
|
atomic_dec(&sbi->total_ext_tree);
|
2017-02-23 11:39:59 +00:00
|
|
|
mutex_unlock(&sbi->extent_tree_lock);
|
2015-07-08 09:59:36 +00:00
|
|
|
|
|
|
|
F2FS_I(inode)->extent_tree = NULL;
|
|
|
|
|
|
|
|
trace_f2fs_destroy_extent_tree(inode, node_cnt);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
|
|
|
|
struct extent_info *ei)
|
|
|
|
{
|
|
|
|
if (!f2fs_may_extent_tree(inode))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return f2fs_lookup_extent_tree(inode, pgofs, ei);
|
|
|
|
}
|
|
|
|
|
|
|
|
void f2fs_update_extent_cache(struct dnode_of_data *dn)
|
|
|
|
{
|
|
|
|
pgoff_t fofs;
|
2016-02-24 09:16:47 +00:00
|
|
|
block_t blkaddr;
|
2015-07-08 09:59:36 +00:00
|
|
|
|
|
|
|
if (!f2fs_may_extent_tree(dn->inode))
|
|
|
|
return;
|
|
|
|
|
2016-02-24 09:16:47 +00:00
|
|
|
if (dn->data_blkaddr == NEW_ADDR)
|
|
|
|
blkaddr = NULL_ADDR;
|
|
|
|
else
|
|
|
|
blkaddr = dn->data_blkaddr;
|
f2fs: update extent tree in batches
This patch introduce a new helper f2fs_update_extent_tree_range which can
do extent mapping update at a specified range.
The main idea is:
1) punch all mapping info in extent node(s) which are at a specified range;
2) try to merge new extent mapping with adjacent node, or failing that,
insert the mapping into extent tree as a new node.
In order to see the benefit, I add a function for stating time stamping
count as below:
uint64_t rdtsc(void)
{
uint32_t lo, hi;
__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
return (uint64_t)hi << 32 | lo;
}
My test environment is: ubuntu, intel i7-3770, 16G memory, 256g micron ssd.
truncation path: update extent cache from truncate_data_blocks_range
non-truncataion path: update extent cache from other paths
total: all update paths
a) Removing 128MB file which has one extent node mapping whole range of
file:
1. dd if=/dev/zero of=/mnt/f2fs/128M bs=1M count=128
2. sync
3. rm /mnt/f2fs/128M
Before:
total count average
truncation: 7651022 32768 233.49
Patched:
total count average
truncation: 3321 33 100.64
b) fsstress:
fsstress -d /mnt/f2fs -l 5 -n 100 -p 20
Test times: 5 times.
Before:
total count average
truncation: 5812480.6 20911.6 277.95
non-truncation: 7783845.6 13440.8 579.12
total: 13596326.2 34352.4 395.79
Patched:
total count average
truncation: 1281283.0 3041.6 421.25
non-truncation: 7355844.4 13662.8 538.38
total: 8637127.4 16704.4 517.06
1) For the updates in truncation path:
- we can see updating in batches leads total tsc and update count reducing
explicitly;
- besides, for a single batched updating, punching multiple extent nodes
in a loop, result in executing more operations, so our average tsc
increase intensively.
2) For the updates in non-truncation path:
- there is a little improvement, that is because for the scenario that we
just need to update in the head or tail of extent node, new interface
optimize to update info in extent node directly, rather than removing
original extent node for updating and then inserting that updated one
into cache as new node.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2015-08-26 12:34:48 +00:00
|
|
|
|
2016-01-26 07:39:35 +00:00
|
|
|
fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
|
|
|
|
dn->ofs_in_node;
|
2016-05-20 23:32:49 +00:00
|
|
|
f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, 1);
|
f2fs: update extent tree in batches
This patch introduce a new helper f2fs_update_extent_tree_range which can
do extent mapping update at a specified range.
The main idea is:
1) punch all mapping info in extent node(s) which are at a specified range;
2) try to merge new extent mapping with adjacent node, or failing that,
insert the mapping into extent tree as a new node.
In order to see the benefit, I add a function for stating time stamping
count as below:
uint64_t rdtsc(void)
{
uint32_t lo, hi;
__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
return (uint64_t)hi << 32 | lo;
}
My test environment is: ubuntu, intel i7-3770, 16G memory, 256g micron ssd.
truncation path: update extent cache from truncate_data_blocks_range
non-truncataion path: update extent cache from other paths
total: all update paths
a) Removing 128MB file which has one extent node mapping whole range of
file:
1. dd if=/dev/zero of=/mnt/f2fs/128M bs=1M count=128
2. sync
3. rm /mnt/f2fs/128M
Before:
total count average
truncation: 7651022 32768 233.49
Patched:
total count average
truncation: 3321 33 100.64
b) fsstress:
fsstress -d /mnt/f2fs -l 5 -n 100 -p 20
Test times: 5 times.
Before:
total count average
truncation: 5812480.6 20911.6 277.95
non-truncation: 7783845.6 13440.8 579.12
total: 13596326.2 34352.4 395.79
Patched:
total count average
truncation: 1281283.0 3041.6 421.25
non-truncation: 7355844.4 13662.8 538.38
total: 8637127.4 16704.4 517.06
1) For the updates in truncation path:
- we can see updating in batches leads total tsc and update count reducing
explicitly;
- besides, for a single batched updating, punching multiple extent nodes
in a loop, result in executing more operations, so our average tsc
increase intensively.
2) For the updates in non-truncation path:
- there is a little improvement, that is because for the scenario that we
just need to update in the head or tail of extent node, new interface
optimize to update info in extent node directly, rather than removing
original extent node for updating and then inserting that updated one
into cache as new node.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2015-08-26 12:34:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
|
|
|
|
pgoff_t fofs, block_t blkaddr, unsigned int len)
|
|
|
|
|
|
|
|
{
|
|
|
|
if (!f2fs_may_extent_tree(dn->inode))
|
|
|
|
return;
|
|
|
|
|
2016-05-20 23:32:49 +00:00
|
|
|
f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, len);
|
2015-07-08 09:59:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void init_extent_cache_info(struct f2fs_sb_info *sbi)
|
|
|
|
{
|
|
|
|
INIT_RADIX_TREE(&sbi->extent_tree_root, GFP_NOIO);
|
2017-02-23 11:39:59 +00:00
|
|
|
mutex_init(&sbi->extent_tree_lock);
|
2015-07-08 09:59:36 +00:00
|
|
|
INIT_LIST_HEAD(&sbi->extent_list);
|
|
|
|
spin_lock_init(&sbi->extent_lock);
|
2015-12-22 03:20:15 +00:00
|
|
|
atomic_set(&sbi->total_ext_tree, 0);
|
2015-12-31 23:02:16 +00:00
|
|
|
INIT_LIST_HEAD(&sbi->zombie_list);
|
2015-12-22 03:25:50 +00:00
|
|
|
atomic_set(&sbi->total_zombie_tree, 0);
|
2015-07-08 09:59:36 +00:00
|
|
|
atomic_set(&sbi->total_ext_node, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int __init create_extent_cache(void)
|
|
|
|
{
|
|
|
|
extent_tree_slab = f2fs_kmem_cache_create("f2fs_extent_tree",
|
|
|
|
sizeof(struct extent_tree));
|
|
|
|
if (!extent_tree_slab)
|
|
|
|
return -ENOMEM;
|
|
|
|
extent_node_slab = f2fs_kmem_cache_create("f2fs_extent_node",
|
|
|
|
sizeof(struct extent_node));
|
|
|
|
if (!extent_node_slab) {
|
|
|
|
kmem_cache_destroy(extent_tree_slab);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void destroy_extent_cache(void)
|
|
|
|
{
|
|
|
|
kmem_cache_destroy(extent_node_slab);
|
|
|
|
kmem_cache_destroy(extent_tree_slab);
|
|
|
|
}
|