2012-11-29 04:28:09 +00:00
|
|
|
/*
|
2012-11-02 08:13:32 +00:00
|
|
|
* fs/f2fs/recovery.c
|
|
|
|
*
|
|
|
|
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
|
|
|
|
* http://www.samsung.com/
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*/
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/f2fs_fs.h>
|
|
|
|
#include "f2fs.h"
|
|
|
|
#include "node.h"
|
|
|
|
#include "segment.h"
|
|
|
|
|
2014-09-15 23:46:08 +00:00
|
|
|
/*
|
|
|
|
* Roll forward recovery scenarios.
|
|
|
|
*
|
|
|
|
* [Term] F: fsync_mark, D: dentry_mark
|
|
|
|
*
|
|
|
|
* 1. inode(x) | CP | inode(x) | dnode(F)
|
|
|
|
* -> Update the latest inode(x).
|
|
|
|
*
|
|
|
|
* 2. inode(x) | CP | inode(F) | dnode(F)
|
|
|
|
* -> No problem.
|
|
|
|
*
|
|
|
|
* 3. inode(x) | CP | dnode(F) | inode(x)
|
|
|
|
* -> Recover to the latest dnode(F), and drop the last inode(x)
|
|
|
|
*
|
|
|
|
* 4. inode(x) | CP | dnode(F) | inode(F)
|
|
|
|
* -> No problem.
|
|
|
|
*
|
|
|
|
* 5. CP | inode(x) | dnode(F)
|
|
|
|
* -> The inode(DF) was missing. Should drop this dnode(F).
|
|
|
|
*
|
|
|
|
* 6. CP | inode(DF) | dnode(F)
|
|
|
|
* -> No problem.
|
|
|
|
*
|
|
|
|
* 7. CP | dnode(F) | inode(DF)
|
|
|
|
* -> If f2fs_iget fails, then goto next to find inode(DF).
|
|
|
|
*
|
|
|
|
* 8. CP | dnode(F) | inode(x)
|
|
|
|
* -> If f2fs_iget fails, then goto next to find inode(DF).
|
|
|
|
* But it will fail due to no inode(DF).
|
|
|
|
*/
|
|
|
|
|
2012-11-02 08:13:32 +00:00
|
|
|
static struct kmem_cache *fsync_entry_slab;
|
|
|
|
|
|
|
|
bool space_for_roll_forward(struct f2fs_sb_info *sbi)
|
|
|
|
{
|
|
|
|
if (sbi->last_valid_block_count + sbi->alloc_valid_block_count
|
|
|
|
> sbi->user_block_count)
|
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
|
|
|
|
nid_t ino)
|
|
|
|
{
|
|
|
|
struct fsync_inode_entry *entry;
|
|
|
|
|
2014-03-29 03:33:17 +00:00
|
|
|
list_for_each_entry(entry, head, list)
|
2012-11-02 08:13:32 +00:00
|
|
|
if (entry->inode->i_ino == ino)
|
|
|
|
return entry;
|
2014-03-29 03:33:17 +00:00
|
|
|
|
2012-11-02 08:13:32 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2014-09-11 21:29:06 +00:00
|
|
|
static int recover_dentry(struct inode *inode, struct page *ipage)
|
2012-11-02 08:13:32 +00:00
|
|
|
{
|
2013-12-26 07:30:41 +00:00
|
|
|
struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
|
2013-05-15 07:40:02 +00:00
|
|
|
nid_t pino = le32_to_cpu(raw_inode->i_pino);
|
2013-05-28 00:19:22 +00:00
|
|
|
struct f2fs_dir_entry *de;
|
2013-01-25 21:15:43 +00:00
|
|
|
struct qstr name;
|
2012-11-02 08:13:32 +00:00
|
|
|
struct page *page;
|
2013-05-28 00:19:22 +00:00
|
|
|
struct inode *dir, *einode;
|
2012-11-02 08:13:32 +00:00
|
|
|
int err = 0;
|
|
|
|
|
2014-04-15 02:19:28 +00:00
|
|
|
dir = f2fs_iget(inode->i_sb, pino);
|
|
|
|
if (IS_ERR(dir)) {
|
|
|
|
err = PTR_ERR(dir);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2015-04-30 00:02:18 +00:00
|
|
|
if (file_enc_name(inode)) {
|
|
|
|
iput(dir);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-01-25 21:15:43 +00:00
|
|
|
name.len = le32_to_cpu(raw_inode->i_namelen);
|
|
|
|
name.name = raw_inode->i_name;
|
2013-12-23 03:12:21 +00:00
|
|
|
|
|
|
|
if (unlikely(name.len > F2FS_NAME_LEN)) {
|
|
|
|
WARN_ON(1);
|
|
|
|
err = -ENAMETOOLONG;
|
2014-06-06 18:05:03 +00:00
|
|
|
goto out_err;
|
2013-12-23 03:12:21 +00:00
|
|
|
}
|
2013-05-28 00:19:22 +00:00
|
|
|
retry:
|
|
|
|
de = f2fs_find_entry(dir, &name, &page);
|
2015-04-01 01:03:29 +00:00
|
|
|
if (de && inode->i_ino == le32_to_cpu(de->ino))
|
2013-09-24 14:40:57 +00:00
|
|
|
goto out_unmap_put;
|
2015-04-01 01:03:29 +00:00
|
|
|
|
2013-05-28 00:19:22 +00:00
|
|
|
if (de) {
|
|
|
|
einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino));
|
|
|
|
if (IS_ERR(einode)) {
|
|
|
|
WARN_ON(1);
|
2014-04-28 09:58:34 +00:00
|
|
|
err = PTR_ERR(einode);
|
|
|
|
if (err == -ENOENT)
|
2013-05-28 00:19:22 +00:00
|
|
|
err = -EEXIST;
|
2013-09-24 14:40:57 +00:00
|
|
|
goto out_unmap_put;
|
|
|
|
}
|
2014-09-02 22:31:18 +00:00
|
|
|
err = acquire_orphan_inode(F2FS_I_SB(inode));
|
2013-09-24 14:40:57 +00:00
|
|
|
if (err) {
|
|
|
|
iput(einode);
|
|
|
|
goto out_unmap_put;
|
2013-05-28 00:19:22 +00:00
|
|
|
}
|
2014-09-24 10:17:04 +00:00
|
|
|
f2fs_delete_entry(de, page, dir, einode);
|
2013-05-28 00:19:22 +00:00
|
|
|
iput(einode);
|
|
|
|
goto retry;
|
2012-11-02 08:13:32 +00:00
|
|
|
}
|
2015-03-30 22:07:16 +00:00
|
|
|
err = __f2fs_add_link(dir, &name, inode, inode->i_ino, inode->i_mode);
|
2014-06-06 18:05:03 +00:00
|
|
|
if (err)
|
|
|
|
goto out_err;
|
|
|
|
|
|
|
|
if (is_inode_flag_set(F2FS_I(dir), FI_DELAY_IPUT)) {
|
|
|
|
iput(dir);
|
|
|
|
} else {
|
|
|
|
add_dirty_dir_inode(dir);
|
|
|
|
set_inode_flag(F2FS_I(dir), FI_DELAY_IPUT);
|
|
|
|
}
|
|
|
|
|
2013-09-24 14:40:57 +00:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
out_unmap_put:
|
2014-11-22 00:36:28 +00:00
|
|
|
f2fs_dentry_kunmap(dir, page);
|
2013-09-24 14:40:57 +00:00
|
|
|
f2fs_put_page(page, 0);
|
2014-06-06 18:05:03 +00:00
|
|
|
out_err:
|
|
|
|
iput(dir);
|
2012-11-02 08:13:32 +00:00
|
|
|
out:
|
2014-01-17 20:44:39 +00:00
|
|
|
f2fs_msg(inode->i_sb, KERN_NOTICE,
|
|
|
|
"%s: ino = %x, name = %s, dir = %lx, err = %d",
|
|
|
|
__func__, ino_of_node(ipage), raw_inode->i_name,
|
2013-05-23 10:02:13 +00:00
|
|
|
IS_ERR(dir) ? 0 : dir->i_ino, err);
|
2012-11-02 08:13:32 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2014-09-11 21:29:06 +00:00
|
|
|
static void recover_inode(struct inode *inode, struct page *page)
|
2012-11-02 08:13:32 +00:00
|
|
|
{
|
2014-09-15 23:46:08 +00:00
|
|
|
struct f2fs_inode *raw = F2FS_INODE(page);
|
2015-04-30 00:02:18 +00:00
|
|
|
char *name;
|
2014-09-15 23:46:08 +00:00
|
|
|
|
|
|
|
inode->i_mode = le16_to_cpu(raw->i_mode);
|
|
|
|
i_size_write(inode, le64_to_cpu(raw->i_size));
|
|
|
|
inode->i_atime.tv_sec = le64_to_cpu(raw->i_mtime);
|
|
|
|
inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
|
|
|
|
inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
|
|
|
|
inode->i_atime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
|
|
|
|
inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec);
|
|
|
|
inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
|
2013-05-16 06:04:49 +00:00
|
|
|
|
2015-04-30 00:02:18 +00:00
|
|
|
if (file_enc_name(inode))
|
|
|
|
name = "<encrypted>";
|
|
|
|
else
|
|
|
|
name = F2FS_INODE(page)->i_name;
|
|
|
|
|
2013-05-16 06:04:49 +00:00
|
|
|
f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode: ino = %x, name = %s",
|
2015-04-30 00:02:18 +00:00
|
|
|
ino_of_node(page), name);
|
2012-11-02 08:13:32 +00:00
|
|
|
}
|
|
|
|
|
2015-12-03 22:14:40 +00:00
|
|
|
static bool is_same_inode(struct inode *inode, struct page *ipage)
|
|
|
|
{
|
|
|
|
struct f2fs_inode *ri = F2FS_INODE(ipage);
|
|
|
|
struct timespec disk;
|
|
|
|
|
|
|
|
if (!IS_INODE(ipage))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
disk.tv_sec = le64_to_cpu(ri->i_ctime);
|
|
|
|
disk.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
|
|
|
|
if (timespec_compare(&inode->i_ctime, &disk) > 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
disk.tv_sec = le64_to_cpu(ri->i_atime);
|
|
|
|
disk.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
|
|
|
|
if (timespec_compare(&inode->i_atime, &disk) > 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
disk.tv_sec = le64_to_cpu(ri->i_mtime);
|
|
|
|
disk.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
|
|
|
|
if (timespec_compare(&inode->i_mtime, &disk) > 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2012-11-02 08:13:32 +00:00
|
|
|
static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
|
|
|
|
{
|
2013-08-09 06:03:21 +00:00
|
|
|
unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
|
2012-11-02 08:13:32 +00:00
|
|
|
struct curseg_info *curseg;
|
2014-09-11 20:49:55 +00:00
|
|
|
struct page *page = NULL;
|
2012-11-02 08:13:32 +00:00
|
|
|
block_t blkaddr;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
/* get node pages in the current segment */
|
|
|
|
curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
|
2014-02-27 11:52:21 +00:00
|
|
|
blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
|
2012-11-02 08:13:32 +00:00
|
|
|
|
2015-10-12 09:05:59 +00:00
|
|
|
ra_meta_pages(sbi, blkaddr, 1, META_POR, true);
|
2014-12-08 07:02:52 +00:00
|
|
|
|
2012-11-02 08:13:32 +00:00
|
|
|
while (1) {
|
|
|
|
struct fsync_inode_entry *entry;
|
|
|
|
|
2015-04-18 10:05:36 +00:00
|
|
|
if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
|
2014-09-11 20:49:55 +00:00
|
|
|
return 0;
|
2012-11-02 08:13:32 +00:00
|
|
|
|
2015-10-12 09:04:21 +00:00
|
|
|
page = get_tmp_page(sbi, blkaddr);
|
2013-03-08 12:29:23 +00:00
|
|
|
|
2013-03-20 10:01:06 +00:00
|
|
|
if (cp_ver != cpver_of_node(page))
|
2013-05-16 06:04:49 +00:00
|
|
|
break;
|
2012-11-02 08:13:32 +00:00
|
|
|
|
|
|
|
if (!is_fsync_dnode(page))
|
|
|
|
goto next;
|
|
|
|
|
|
|
|
entry = get_fsync_inode(head, ino_of_node(page));
|
2015-12-03 22:14:40 +00:00
|
|
|
if (entry) {
|
|
|
|
if (!is_same_inode(entry->inode, page))
|
|
|
|
goto next;
|
|
|
|
} else {
|
2012-11-02 08:13:32 +00:00
|
|
|
if (IS_INODE(page) && is_dent_dnode(page)) {
|
2013-03-20 10:01:06 +00:00
|
|
|
err = recover_inode_page(sbi, page);
|
|
|
|
if (err)
|
2013-05-16 06:04:49 +00:00
|
|
|
break;
|
2012-11-02 08:13:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* add this fsync inode to the list */
|
2014-09-11 21:29:06 +00:00
|
|
|
entry = kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
|
2012-11-02 08:13:32 +00:00
|
|
|
if (!entry) {
|
|
|
|
err = -ENOMEM;
|
2013-05-16 06:04:49 +00:00
|
|
|
break;
|
2012-11-02 08:13:32 +00:00
|
|
|
}
|
2014-09-15 23:46:08 +00:00
|
|
|
/*
|
|
|
|
* CP | dnode(F) | inode(DF)
|
|
|
|
* For this case, we should not give up now.
|
|
|
|
*/
|
2012-11-02 08:13:32 +00:00
|
|
|
entry->inode = f2fs_iget(sbi->sb, ino_of_node(page));
|
|
|
|
if (IS_ERR(entry->inode)) {
|
|
|
|
err = PTR_ERR(entry->inode);
|
2012-12-22 03:10:12 +00:00
|
|
|
kmem_cache_free(fsync_entry_slab, entry);
|
2015-02-25 02:01:46 +00:00
|
|
|
if (err == -ENOENT) {
|
|
|
|
err = 0;
|
2014-09-15 23:46:08 +00:00
|
|
|
goto next;
|
2015-02-25 02:01:46 +00:00
|
|
|
}
|
2013-05-16 06:04:49 +00:00
|
|
|
break;
|
2012-11-02 08:13:32 +00:00
|
|
|
}
|
2012-12-22 03:10:12 +00:00
|
|
|
list_add_tail(&entry->list, head);
|
2012-11-02 08:13:32 +00:00
|
|
|
}
|
2013-05-15 01:49:13 +00:00
|
|
|
entry->blkaddr = blkaddr;
|
|
|
|
|
2014-09-11 21:29:06 +00:00
|
|
|
if (IS_INODE(page)) {
|
|
|
|
entry->last_inode = blkaddr;
|
|
|
|
if (is_dent_dnode(page))
|
|
|
|
entry->last_dentry = blkaddr;
|
|
|
|
}
|
2012-11-02 08:13:32 +00:00
|
|
|
next:
|
|
|
|
/* check next segment */
|
|
|
|
blkaddr = next_blkaddr_of_node(page);
|
2014-09-11 20:49:55 +00:00
|
|
|
f2fs_put_page(page, 1);
|
2014-12-08 07:02:52 +00:00
|
|
|
|
|
|
|
ra_meta_pages_cond(sbi, blkaddr);
|
2012-11-02 08:13:32 +00:00
|
|
|
}
|
2014-09-11 20:49:55 +00:00
|
|
|
f2fs_put_page(page, 1);
|
2012-11-02 08:13:32 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2013-06-27 01:28:54 +00:00
|
|
|
static void destroy_fsync_dnodes(struct list_head *head)
|
2012-11-02 08:13:32 +00:00
|
|
|
{
|
2013-01-20 15:02:58 +00:00
|
|
|
struct fsync_inode_entry *entry, *tmp;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(entry, tmp, head, list) {
|
2012-11-02 08:13:32 +00:00
|
|
|
iput(entry->inode);
|
|
|
|
list_del(&entry->list);
|
|
|
|
kmem_cache_free(fsync_entry_slab, entry);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-05-21 23:20:01 +00:00
|
|
|
static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
|
2013-05-21 23:02:02 +00:00
|
|
|
block_t blkaddr, struct dnode_of_data *dn)
|
2012-11-02 08:13:32 +00:00
|
|
|
{
|
|
|
|
struct seg_entry *sentry;
|
|
|
|
unsigned int segno = GET_SEGNO(sbi, blkaddr);
|
2014-02-04 04:01:10 +00:00
|
|
|
unsigned short blkoff = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
|
2014-01-28 05:54:07 +00:00
|
|
|
struct f2fs_summary_block *sum_node;
|
2012-11-02 08:13:32 +00:00
|
|
|
struct f2fs_summary sum;
|
2014-01-28 05:54:07 +00:00
|
|
|
struct page *sum_page, *node_page;
|
2015-03-27 01:46:38 +00:00
|
|
|
struct dnode_of_data tdn = *dn;
|
2013-05-21 23:02:02 +00:00
|
|
|
nid_t ino, nid;
|
2012-11-02 08:13:32 +00:00
|
|
|
struct inode *inode;
|
2013-08-12 12:08:03 +00:00
|
|
|
unsigned int offset;
|
2012-11-02 08:13:32 +00:00
|
|
|
block_t bidx;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
sentry = get_seg_entry(sbi, segno);
|
|
|
|
if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
|
2013-05-21 23:20:01 +00:00
|
|
|
return 0;
|
2012-11-02 08:13:32 +00:00
|
|
|
|
|
|
|
/* Get the previous summary */
|
|
|
|
for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) {
|
|
|
|
struct curseg_info *curseg = CURSEG_I(sbi, i);
|
|
|
|
if (curseg->segno == segno) {
|
|
|
|
sum = curseg->sum_blk->entries[blkoff];
|
2014-01-28 05:54:07 +00:00
|
|
|
goto got_it;
|
2012-11-02 08:13:32 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-28 05:54:07 +00:00
|
|
|
sum_page = get_sum_page(sbi, segno);
|
|
|
|
sum_node = (struct f2fs_summary_block *)page_address(sum_page);
|
|
|
|
sum = sum_node->entries[blkoff];
|
|
|
|
f2fs_put_page(sum_page, 1);
|
|
|
|
got_it:
|
2013-05-21 23:02:02 +00:00
|
|
|
/* Use the locked dnode page and inode */
|
|
|
|
nid = le32_to_cpu(sum.nid);
|
|
|
|
if (dn->inode->i_ino == nid) {
|
|
|
|
tdn.nid = nid;
|
2015-03-27 01:46:38 +00:00
|
|
|
if (!dn->inode_page_locked)
|
|
|
|
lock_page(dn->inode_page);
|
2013-05-21 23:02:02 +00:00
|
|
|
tdn.node_page = dn->inode_page;
|
2013-06-23 22:47:23 +00:00
|
|
|
tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
|
2015-03-27 01:46:38 +00:00
|
|
|
goto truncate_out;
|
2013-05-21 23:02:02 +00:00
|
|
|
} else if (dn->nid == nid) {
|
2013-06-23 22:47:23 +00:00
|
|
|
tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
|
2015-03-27 01:46:38 +00:00
|
|
|
goto truncate_out;
|
2013-05-21 23:02:02 +00:00
|
|
|
}
|
|
|
|
|
2012-11-02 08:13:32 +00:00
|
|
|
/* Get the node page */
|
2013-05-21 23:02:02 +00:00
|
|
|
node_page = get_node_page(sbi, nid);
|
2013-05-21 23:20:01 +00:00
|
|
|
if (IS_ERR(node_page))
|
|
|
|
return PTR_ERR(node_page);
|
2013-08-12 12:08:03 +00:00
|
|
|
|
|
|
|
offset = ofs_of_node(node_page);
|
2012-11-02 08:13:32 +00:00
|
|
|
ino = ino_of_node(node_page);
|
|
|
|
f2fs_put_page(node_page, 1);
|
|
|
|
|
f2fs: fix double lock for inode page during roll-foward recovery
If the inode is same and its data index are needed to truncate, we can fall into
double lock for its inode page via get_dnode_of_data.
Error case is like this.
1. write data 1, 2, 3, 4, 5 in inode #4.
2. write data 100, 102, 103, 104, 105 in dnode #6 of inode #4.
3. sync
4. update data 100->106 in dnode #6.
5. fsync inode #4.
6. power-cut
-> Then,
1. go back to #3's checkpoint
2. in do_recover_data, get_dnode_of_data() gets inode #4.
3. detect 100->106 in dnode #6.
4. check_index_in_prev_nodes tries to truncate 100 in dnode #6.
5. to trigger truncate_hole, get_dnode_of_data should grab inode #4.
6. detect *kernel hang*
This patch should resolve that bug.
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2014-09-12 15:35:58 +00:00
|
|
|
if (ino != dn->inode->i_ino) {
|
|
|
|
/* Deallocate previous index in the node page */
|
|
|
|
inode = f2fs_iget(sbi->sb, ino);
|
|
|
|
if (IS_ERR(inode))
|
|
|
|
return PTR_ERR(inode);
|
|
|
|
} else {
|
|
|
|
inode = dn->inode;
|
|
|
|
}
|
2012-12-22 03:09:43 +00:00
|
|
|
|
2016-01-26 07:39:35 +00:00
|
|
|
bidx = start_bidx_of_node(offset, inode) + le16_to_cpu(sum.ofs_in_node);
|
2013-08-12 12:08:03 +00:00
|
|
|
|
2015-03-27 01:46:38 +00:00
|
|
|
/*
|
|
|
|
* if inode page is locked, unlock temporarily, but its reference
|
|
|
|
* count keeps alive.
|
|
|
|
*/
|
|
|
|
if (ino == dn->inode->i_ino && dn->inode_page_locked)
|
|
|
|
unlock_page(dn->inode_page);
|
|
|
|
|
|
|
|
set_new_dnode(&tdn, inode, NULL, NULL, 0);
|
|
|
|
if (get_dnode_of_data(&tdn, bidx, LOOKUP_NODE))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (tdn.data_blkaddr == blkaddr)
|
|
|
|
truncate_data_blocks_range(&tdn, 1);
|
|
|
|
|
|
|
|
f2fs_put_dnode(&tdn);
|
|
|
|
out:
|
|
|
|
if (ino != dn->inode->i_ino)
|
f2fs: fix double lock for inode page during roll-foward recovery
If the inode is same and its data index are needed to truncate, we can fall into
double lock for its inode page via get_dnode_of_data.
Error case is like this.
1. write data 1, 2, 3, 4, 5 in inode #4.
2. write data 100, 102, 103, 104, 105 in dnode #6 of inode #4.
3. sync
4. update data 100->106 in dnode #6.
5. fsync inode #4.
6. power-cut
-> Then,
1. go back to #3's checkpoint
2. in do_recover_data, get_dnode_of_data() gets inode #4.
3. detect 100->106 in dnode #6.
4. check_index_in_prev_nodes tries to truncate 100 in dnode #6.
5. to trigger truncate_hole, get_dnode_of_data should grab inode #4.
6. detect *kernel hang*
This patch should resolve that bug.
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2014-09-12 15:35:58 +00:00
|
|
|
iput(inode);
|
2015-03-27 01:46:38 +00:00
|
|
|
else if (dn->inode_page_locked)
|
|
|
|
lock_page(dn->inode_page);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
truncate_out:
|
|
|
|
if (datablock_addr(tdn.node_page, tdn.ofs_in_node) == blkaddr)
|
|
|
|
truncate_data_blocks_range(&tdn, 1);
|
|
|
|
if (dn->inode->i_ino == nid && !dn->inode_page_locked)
|
|
|
|
unlock_page(dn->inode_page);
|
2013-05-21 23:20:01 +00:00
|
|
|
return 0;
|
2012-11-02 08:13:32 +00:00
|
|
|
}
|
|
|
|
|
2013-03-20 10:01:06 +00:00
|
|
|
static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
|
2012-11-02 08:13:32 +00:00
|
|
|
struct page *page, block_t blkaddr)
|
|
|
|
{
|
|
|
|
struct dnode_of_data dn;
|
|
|
|
struct node_info ni;
|
2016-01-26 07:39:35 +00:00
|
|
|
unsigned int start, end;
|
2013-05-16 06:04:49 +00:00
|
|
|
int err = 0, recovered = 0;
|
2012-11-02 08:13:32 +00:00
|
|
|
|
2014-08-08 06:49:17 +00:00
|
|
|
/* step 1: recover xattr */
|
|
|
|
if (IS_INODE(page)) {
|
|
|
|
recover_inline_xattr(inode, page);
|
|
|
|
} else if (f2fs_has_xattr_block(ofs_of_node(page))) {
|
2015-01-22 22:48:28 +00:00
|
|
|
/*
|
|
|
|
* Deprecated; xattr blocks should be found from cold log.
|
|
|
|
* But, we should remain this for backward compatibility.
|
|
|
|
*/
|
2014-08-08 06:49:17 +00:00
|
|
|
recover_xattr_data(inode, page, blkaddr);
|
2013-12-26 03:49:48 +00:00
|
|
|
goto out;
|
2014-08-08 06:49:17 +00:00
|
|
|
}
|
2013-12-26 03:49:48 +00:00
|
|
|
|
2014-08-08 06:49:17 +00:00
|
|
|
/* step 2: recover inline data */
|
|
|
|
if (recover_inline_data(inode, page))
|
2014-01-28 03:25:06 +00:00
|
|
|
goto out;
|
|
|
|
|
2014-08-08 06:49:17 +00:00
|
|
|
/* step 3: recover data indices */
|
2016-01-26 07:39:35 +00:00
|
|
|
start = start_bidx_of_node(ofs_of_node(page), inode);
|
|
|
|
end = start + ADDRS_PER_PAGE(page, inode);
|
2012-11-02 08:13:32 +00:00
|
|
|
|
|
|
|
set_new_dnode(&dn, inode, NULL, NULL, 0);
|
f2fs: introduce a new global lock scheme
In the previous version, f2fs uses global locks according to the usage types,
such as directory operations, block allocation, block write, and so on.
Reference the following lock types in f2fs.h.
enum lock_type {
RENAME, /* for renaming operations */
DENTRY_OPS, /* for directory operations */
DATA_WRITE, /* for data write */
DATA_NEW, /* for data allocation */
DATA_TRUNC, /* for data truncate */
NODE_NEW, /* for node allocation */
NODE_TRUNC, /* for node truncate */
NODE_WRITE, /* for node write */
NR_LOCK_TYPE,
};
In that case, we lose the performance under the multi-threading environment,
since every types of operations must be conducted one at a time.
In order to address the problem, let's share the locks globally with a mutex
array regardless of any types.
So, let users grab a mutex and perform their jobs in parallel as much as
possbile.
For this, I propose a new global lock scheme as follows.
0. Data structure
- f2fs_sb_info -> mutex_lock[NR_GLOBAL_LOCKS]
- f2fs_sb_info -> node_write
1. mutex_lock_op(sbi)
- try to get an avaiable lock from the array.
- returns the index of the gottern lock variable.
2. mutex_unlock_op(sbi, index of the lock)
- unlock the given index of the lock.
3. mutex_lock_all(sbi)
- grab all the locks in the array before the checkpoint.
4. mutex_unlock_all(sbi)
- release all the locks in the array after checkpoint.
5. block_operations()
- call mutex_lock_all()
- sync_dirty_dir_inodes()
- grab node_write
- sync_node_pages()
Note that,
the pairs of mutex_lock_op()/mutex_unlock_op() and
mutex_lock_all()/mutex_unlock_all() should be used together.
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
2012-11-22 07:21:29 +00:00
|
|
|
|
2013-03-20 10:01:06 +00:00
|
|
|
err = get_dnode_of_data(&dn, start, ALLOC_NODE);
|
2015-09-25 09:54:56 +00:00
|
|
|
if (err)
|
2013-12-26 03:49:48 +00:00
|
|
|
goto out;
|
2012-11-02 08:13:32 +00:00
|
|
|
|
2016-01-20 15:43:51 +00:00
|
|
|
f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
|
2012-11-02 08:13:32 +00:00
|
|
|
|
|
|
|
get_node_info(sbi, dn.nid, &ni);
|
2014-09-02 22:52:58 +00:00
|
|
|
f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
|
|
|
|
f2fs_bug_on(sbi, ofs_of_node(dn.node_page) != ofs_of_node(page));
|
2012-11-02 08:13:32 +00:00
|
|
|
|
f2fs: recover invalid/reserved block address for fsynced file
When testing with generic/101 in xfstests, error message outputed as below:
--- tests/generic/101.out
+++ results//generic/101.out.bad
@@ -10,10 +10,14 @@
File foo content after log replay:
0000000 aa aa aa aa aa aa aa aa aa aa aa aa aa aa aa aa
*
-0200000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+0200000 bb bb bb bb bb bb bb bb bb bb bb bb bb bb bb bb
*
0372000
...
(Run 'diff -u tests/generic/101.out results/generic/101.out.bad' to see the entire diff)
The test flow is like below:
1. pwrite foo -S 0xaa 0 64K
2. pwrite foo -S 0xbb 64K 61K
3. sync
4. truncate foo 64K
5. truncate foo 125K
6. fsync foo
7. flakey drop writes
8. umount
After this test, we expect the data of recovered file will have the first
64k of data filling with value 0xaa and the next 61k of data filling with
value 0x00 because we have fsynced it before dropping writes in dm.
In f2fs, during recovering, we will only recover the valid block address
in direct node page if it is marked as a fsynced dnode, but block address
which means invalid/reserved (with value NULL_ADDR/NEW_ADDR) will not be
recovered. So, the file recovered shows its incorrect data 0xbb in range of
[61k, 125k].
In this patch, we fix to recover invalid/reserved block during recover flow.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2015-08-05 09:23:54 +00:00
|
|
|
for (; start < end; start++, dn.ofs_in_node++) {
|
2012-11-02 08:13:32 +00:00
|
|
|
block_t src, dest;
|
|
|
|
|
|
|
|
src = datablock_addr(dn.node_page, dn.ofs_in_node);
|
|
|
|
dest = datablock_addr(page, dn.ofs_in_node);
|
|
|
|
|
f2fs: recover invalid/reserved block address for fsynced file
When testing with generic/101 in xfstests, error message outputed as below:
--- tests/generic/101.out
+++ results//generic/101.out.bad
@@ -10,10 +10,14 @@
File foo content after log replay:
0000000 aa aa aa aa aa aa aa aa aa aa aa aa aa aa aa aa
*
-0200000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+0200000 bb bb bb bb bb bb bb bb bb bb bb bb bb bb bb bb
*
0372000
...
(Run 'diff -u tests/generic/101.out results/generic/101.out.bad' to see the entire diff)
The test flow is like below:
1. pwrite foo -S 0xaa 0 64K
2. pwrite foo -S 0xbb 64K 61K
3. sync
4. truncate foo 64K
5. truncate foo 125K
6. fsync foo
7. flakey drop writes
8. umount
After this test, we expect the data of recovered file will have the first
64k of data filling with value 0xaa and the next 61k of data filling with
value 0x00 because we have fsynced it before dropping writes in dm.
In f2fs, during recovering, we will only recover the valid block address
in direct node page if it is marked as a fsynced dnode, but block address
which means invalid/reserved (with value NULL_ADDR/NEW_ADDR) will not be
recovered. So, the file recovered shows its incorrect data 0xbb in range of
[61k, 125k].
In this patch, we fix to recover invalid/reserved block during recover flow.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2015-08-05 09:23:54 +00:00
|
|
|
/* skip recovering if dest is the same as src */
|
|
|
|
if (src == dest)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* dest is invalid, just invalidate src block */
|
|
|
|
if (dest == NULL_ADDR) {
|
|
|
|
truncate_data_blocks_range(&dn, 1);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* dest is reserved block, invalidate src block
|
|
|
|
* and then reserve one new block in dnode page.
|
|
|
|
*/
|
|
|
|
if (dest == NEW_ADDR) {
|
|
|
|
truncate_data_blocks_range(&dn, 1);
|
|
|
|
err = reserve_new_block(&dn);
|
|
|
|
f2fs_bug_on(sbi, err);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* dest is valid block, try to recover from src to dest */
|
|
|
|
if (is_valid_blkaddr(sbi, dest, META_POR)) {
|
2015-04-02 02:38:20 +00:00
|
|
|
|
2012-11-02 08:13:32 +00:00
|
|
|
if (src == NULL_ADDR) {
|
2013-10-29 06:14:54 +00:00
|
|
|
err = reserve_new_block(&dn);
|
2012-11-02 08:13:32 +00:00
|
|
|
/* We should not get -ENOSPC */
|
2014-09-02 22:52:58 +00:00
|
|
|
f2fs_bug_on(sbi, err);
|
2012-11-02 08:13:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Check the previous node page having this index */
|
2013-05-21 23:20:01 +00:00
|
|
|
err = check_index_in_prev_nodes(sbi, dest, &dn);
|
|
|
|
if (err)
|
|
|
|
goto err;
|
2012-11-02 08:13:32 +00:00
|
|
|
|
|
|
|
/* write dummy data page */
|
2015-05-28 11:15:35 +00:00
|
|
|
f2fs_replace_block(sbi, &dn, src, dest,
|
f2fs: support revoking atomic written pages
f2fs support atomic write with following semantics:
1. open db file
2. ioctl start atomic write
3. (write db file) * n
4. ioctl commit atomic write
5. close db file
With this flow we can avoid file becoming corrupted when abnormal power
cut, because we hold data of transaction in referenced pages linked in
inmem_pages list of inode, but without setting them dirty, so these data
won't be persisted unless we commit them in step 4.
But we should still hold journal db file in memory by using volatile
write, because our semantics of 'atomic write support' is incomplete, in
step 4, we could fail to submit all dirty data of transaction, once
partial dirty data was committed in storage, then after a checkpoint &
abnormal power-cut, db file will be corrupted forever.
So this patch tries to improve atomic write flow by adding a revoking flow,
once inner error occurs in committing, this gives another chance to try to
revoke these partial submitted data of current transaction, it makes
committing operation more like aotmical one.
If we're not lucky, once revoking operation was failed, EAGAIN will be
reported to user for suggesting doing the recovery with held journal file,
or retrying current transaction again.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2016-02-06 06:40:34 +00:00
|
|
|
ni.version, false, false);
|
2013-05-16 06:04:49 +00:00
|
|
|
recovered++;
|
2012-11-02 08:13:32 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (IS_INODE(dn.node_page))
|
|
|
|
sync_inode_page(&dn);
|
|
|
|
|
|
|
|
copy_node_footer(dn.node_page, page);
|
|
|
|
fill_node_footer(dn.node_page, dn.nid, ni.ino,
|
|
|
|
ofs_of_node(page), false);
|
|
|
|
set_page_dirty(dn.node_page);
|
2013-05-21 23:20:01 +00:00
|
|
|
err:
|
2012-11-02 08:13:32 +00:00
|
|
|
f2fs_put_dnode(&dn);
|
2013-12-26 03:49:48 +00:00
|
|
|
out:
|
2014-01-17 20:44:39 +00:00
|
|
|
f2fs_msg(sbi->sb, KERN_NOTICE,
|
|
|
|
"recover_data: ino = %lx, recovered = %d blocks, err = %d",
|
|
|
|
inode->i_ino, recovered, err);
|
2013-05-21 23:20:01 +00:00
|
|
|
return err;
|
2012-11-02 08:13:32 +00:00
|
|
|
}
|
|
|
|
|
2015-12-01 03:43:59 +00:00
|
|
|
static int recover_data(struct f2fs_sb_info *sbi, struct list_head *head)
|
2012-11-02 08:13:32 +00:00
|
|
|
{
|
2013-08-09 06:03:21 +00:00
|
|
|
unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
|
2012-11-02 08:13:32 +00:00
|
|
|
struct curseg_info *curseg;
|
2014-09-11 20:49:55 +00:00
|
|
|
struct page *page = NULL;
|
2013-03-20 10:01:06 +00:00
|
|
|
int err = 0;
|
2012-11-02 08:13:32 +00:00
|
|
|
block_t blkaddr;
|
|
|
|
|
|
|
|
/* get node pages in the current segment */
|
2015-12-01 03:43:59 +00:00
|
|
|
curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
|
2012-11-02 08:13:32 +00:00
|
|
|
blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
struct fsync_inode_entry *entry;
|
|
|
|
|
2015-04-18 10:05:36 +00:00
|
|
|
if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
|
2014-09-11 20:49:55 +00:00
|
|
|
break;
|
2012-11-02 08:13:32 +00:00
|
|
|
|
2014-12-08 07:02:52 +00:00
|
|
|
ra_meta_pages_cond(sbi, blkaddr);
|
|
|
|
|
2015-10-12 09:04:21 +00:00
|
|
|
page = get_tmp_page(sbi, blkaddr);
|
2013-03-08 12:29:23 +00:00
|
|
|
|
2014-09-11 20:49:55 +00:00
|
|
|
if (cp_ver != cpver_of_node(page)) {
|
|
|
|
f2fs_put_page(page, 1);
|
2013-05-20 01:26:09 +00:00
|
|
|
break;
|
2014-09-11 20:49:55 +00:00
|
|
|
}
|
2012-11-02 08:13:32 +00:00
|
|
|
|
|
|
|
entry = get_fsync_inode(head, ino_of_node(page));
|
|
|
|
if (!entry)
|
|
|
|
goto next;
|
2014-09-15 23:46:08 +00:00
|
|
|
/*
|
|
|
|
* inode(x) | CP | inode(x) | dnode(F)
|
|
|
|
* In this case, we can lose the latest inode(x).
|
2014-09-11 21:29:06 +00:00
|
|
|
* So, call recover_inode for the inode update.
|
2014-09-15 23:46:08 +00:00
|
|
|
*/
|
2014-09-11 21:29:06 +00:00
|
|
|
if (entry->last_inode == blkaddr)
|
|
|
|
recover_inode(entry->inode, page);
|
|
|
|
if (entry->last_dentry == blkaddr) {
|
|
|
|
err = recover_dentry(entry->inode, page);
|
|
|
|
if (err) {
|
|
|
|
f2fs_put_page(page, 1);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2013-03-20 10:01:06 +00:00
|
|
|
err = do_recover_data(sbi, entry->inode, page, blkaddr);
|
2014-09-11 20:49:55 +00:00
|
|
|
if (err) {
|
|
|
|
f2fs_put_page(page, 1);
|
2013-05-20 01:26:09 +00:00
|
|
|
break;
|
2014-09-11 20:49:55 +00:00
|
|
|
}
|
2012-11-02 08:13:32 +00:00
|
|
|
|
|
|
|
if (entry->blkaddr == blkaddr) {
|
|
|
|
iput(entry->inode);
|
|
|
|
list_del(&entry->list);
|
|
|
|
kmem_cache_free(fsync_entry_slab, entry);
|
|
|
|
}
|
|
|
|
next:
|
|
|
|
/* check next segment */
|
|
|
|
blkaddr = next_blkaddr_of_node(page);
|
2014-09-11 20:49:55 +00:00
|
|
|
f2fs_put_page(page, 1);
|
2012-11-02 08:13:32 +00:00
|
|
|
}
|
2013-03-20 10:01:06 +00:00
|
|
|
if (!err)
|
|
|
|
allocate_new_segments(sbi);
|
|
|
|
return err;
|
2012-11-02 08:13:32 +00:00
|
|
|
}
|
|
|
|
|
2013-03-20 10:01:06 +00:00
|
|
|
int recover_fsync_data(struct f2fs_sb_info *sbi)
|
2012-11-02 08:13:32 +00:00
|
|
|
{
|
2014-07-25 22:47:25 +00:00
|
|
|
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
|
2012-11-02 08:13:32 +00:00
|
|
|
struct list_head inode_list;
|
2014-07-25 22:47:25 +00:00
|
|
|
block_t blkaddr;
|
2013-03-20 10:01:06 +00:00
|
|
|
int err;
|
2013-10-23 04:39:32 +00:00
|
|
|
bool need_writecp = false;
|
2012-11-02 08:13:32 +00:00
|
|
|
|
|
|
|
fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
|
2014-03-07 10:43:28 +00:00
|
|
|
sizeof(struct fsync_inode_entry));
|
2013-12-06 06:00:58 +00:00
|
|
|
if (!fsync_entry_slab)
|
2013-03-20 10:01:06 +00:00
|
|
|
return -ENOMEM;
|
2012-11-02 08:13:32 +00:00
|
|
|
|
|
|
|
INIT_LIST_HEAD(&inode_list);
|
|
|
|
|
2014-08-13 23:30:46 +00:00
|
|
|
/* prevent checkpoint */
|
|
|
|
mutex_lock(&sbi->cp_mutex);
|
|
|
|
|
2014-07-25 22:47:25 +00:00
|
|
|
blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
|
|
|
|
|
2015-08-11 19:45:39 +00:00
|
|
|
/* step #1: find fsynced inode numbers */
|
2013-03-20 10:01:06 +00:00
|
|
|
err = find_fsync_dnodes(sbi, &inode_list);
|
|
|
|
if (err)
|
2012-11-02 08:13:32 +00:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (list_empty(&inode_list))
|
|
|
|
goto out;
|
|
|
|
|
2013-10-23 04:39:32 +00:00
|
|
|
need_writecp = true;
|
2013-09-24 01:26:24 +00:00
|
|
|
|
2012-11-02 08:13:32 +00:00
|
|
|
/* step #2: recover data */
|
2015-12-01 03:43:59 +00:00
|
|
|
err = recover_data(sbi, &inode_list);
|
2014-08-08 17:18:43 +00:00
|
|
|
if (!err)
|
2014-09-02 22:52:58 +00:00
|
|
|
f2fs_bug_on(sbi, !list_empty(&inode_list));
|
2012-11-02 08:13:32 +00:00
|
|
|
out:
|
2013-06-27 01:28:54 +00:00
|
|
|
destroy_fsync_dnodes(&inode_list);
|
2012-11-02 08:13:32 +00:00
|
|
|
kmem_cache_destroy(fsync_entry_slab);
|
2014-07-25 22:47:25 +00:00
|
|
|
|
2014-09-11 20:49:55 +00:00
|
|
|
/* truncate meta pages to be used by the recovery */
|
|
|
|
truncate_inode_pages_range(META_MAPPING(sbi),
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
|
|
|
(loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1);
|
2014-09-11 20:49:55 +00:00
|
|
|
|
2014-07-25 22:47:25 +00:00
|
|
|
if (err) {
|
|
|
|
truncate_inode_pages_final(NODE_MAPPING(sbi));
|
|
|
|
truncate_inode_pages_final(META_MAPPING(sbi));
|
|
|
|
}
|
|
|
|
|
2015-01-28 09:48:42 +00:00
|
|
|
clear_sbi_flag(sbi, SBI_POR_DOING);
|
2014-07-25 22:47:25 +00:00
|
|
|
if (err) {
|
2015-07-28 10:36:47 +00:00
|
|
|
bool invalidate = false;
|
|
|
|
|
|
|
|
if (discard_next_dnode(sbi, blkaddr))
|
|
|
|
invalidate = true;
|
2014-07-25 22:47:25 +00:00
|
|
|
|
|
|
|
/* Flush all the NAT/SIT pages */
|
|
|
|
while (get_pages(sbi, F2FS_DIRTY_META))
|
|
|
|
sync_meta_pages(sbi, META, LONG_MAX);
|
2015-07-28 10:36:47 +00:00
|
|
|
|
|
|
|
/* invalidate temporary meta page */
|
|
|
|
if (invalidate)
|
|
|
|
invalidate_mapping_pages(META_MAPPING(sbi),
|
|
|
|
blkaddr, blkaddr);
|
|
|
|
|
2014-08-13 23:30:46 +00:00
|
|
|
set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
|
|
|
|
mutex_unlock(&sbi->cp_mutex);
|
2014-07-25 22:47:25 +00:00
|
|
|
} else if (need_writecp) {
|
2014-09-21 04:57:51 +00:00
|
|
|
struct cp_control cpc = {
|
2015-04-10 00:03:53 +00:00
|
|
|
.reason = CP_RECOVERY,
|
2014-09-21 04:57:51 +00:00
|
|
|
};
|
2014-08-13 23:30:46 +00:00
|
|
|
mutex_unlock(&sbi->cp_mutex);
|
2015-12-23 09:50:30 +00:00
|
|
|
err = write_checkpoint(sbi, &cpc);
|
2014-08-13 23:30:46 +00:00
|
|
|
} else {
|
|
|
|
mutex_unlock(&sbi->cp_mutex);
|
2014-07-25 22:47:25 +00:00
|
|
|
}
|
2013-03-20 10:01:06 +00:00
|
|
|
return err;
|
2012-11-02 08:13:32 +00:00
|
|
|
}
|