firmware/br-ext-chip-allwinner/board/v83x/kernel/patches/00000-fs_f2fs_super.c.patch

2660 lines
73 KiB
Diff

diff -drupN a/fs/f2fs/super.c b/fs/f2fs/super.c
--- a/fs/f2fs/super.c 2018-08-06 17:23:04.000000000 +0300
+++ b/fs/f2fs/super.c 2022-06-12 05:28:14.000000000 +0300
@@ -22,8 +22,10 @@
#include <linux/random.h>
#include <linux/exportfs.h>
#include <linux/blkdev.h>
+#include <linux/quotaops.h>
#include <linux/f2fs_fs.h>
#include <linux/sysfs.h>
+#include <linux/quota.h>
#include "f2fs.h"
#include "node.h"
@@ -35,20 +37,22 @@
#define CREATE_TRACE_POINTS
#include <trace/events/f2fs.h>
-static struct proc_dir_entry *f2fs_proc_root;
static struct kmem_cache *f2fs_inode_cachep;
-static struct kset *f2fs_kset;
#ifdef CONFIG_F2FS_FAULT_INJECTION
char *fault_name[FAULT_MAX] = {
[FAULT_KMALLOC] = "kmalloc",
+ [FAULT_KVMALLOC] = "kvmalloc",
[FAULT_PAGE_ALLOC] = "page alloc",
+ [FAULT_PAGE_GET] = "page get",
+ [FAULT_ALLOC_BIO] = "alloc bio",
[FAULT_ALLOC_NID] = "alloc nid",
[FAULT_ORPHAN] = "orphan",
[FAULT_BLOCK] = "no more block",
[FAULT_DIR_DEPTH] = "too big dir depth",
[FAULT_EVICT_INODE] = "evict_inode fail",
+ [FAULT_TRUNCATE] = "truncate fail",
[FAULT_IO] = "IO error",
[FAULT_CHECKPOINT] = "checkpoint error",
};
@@ -56,7 +60,7 @@ char *fault_name[FAULT_MAX] = {
static void f2fs_build_fault_attr(struct f2fs_sb_info *sbi,
unsigned int rate)
{
- struct f2fs_fault_info *ffi = &sbi->fault_info;
+ struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
if (rate) {
atomic_set(&ffi->inject_ops, 0);
@@ -82,6 +86,7 @@ enum {
Opt_discard,
Opt_nodiscard,
Opt_noheap,
+ Opt_heap,
Opt_user_xattr,
Opt_nouser_xattr,
Opt_acl,
@@ -89,6 +94,8 @@ enum {
Opt_active_logs,
Opt_disable_ext_identify,
Opt_inline_xattr,
+ Opt_noinline_xattr,
+ Opt_inline_xattr_size,
Opt_inline_data,
Opt_inline_dentry,
Opt_noinline_dentry,
@@ -100,10 +107,32 @@ enum {
Opt_noextent_cache,
Opt_noinline_data,
Opt_data_flush,
+ Opt_reserve_root,
+ Opt_resgid,
+ Opt_resuid,
Opt_mode,
+ Opt_io_size_bits,
Opt_fault_injection,
Opt_lazytime,
Opt_nolazytime,
+ Opt_quota,
+ Opt_noquota,
+ Opt_usrquota,
+ Opt_grpquota,
+ Opt_prjquota,
+ Opt_usrjquota,
+ Opt_grpjquota,
+ Opt_prjjquota,
+ Opt_offusrjquota,
+ Opt_offgrpjquota,
+ Opt_offprjjquota,
+ Opt_jqfmt_vfsold,
+ Opt_jqfmt_vfsv0,
+ Opt_jqfmt_vfsv1,
+ Opt_whint,
+ Opt_alloc,
+ Opt_fsync,
+ Opt_test_dummy_encryption,
Opt_err,
};
@@ -114,6 +143,7 @@ static match_table_t f2fs_tokens = {
{Opt_discard, "discard"},
{Opt_nodiscard, "nodiscard"},
{Opt_noheap, "no_heap"},
+ {Opt_heap, "heap"},
{Opt_user_xattr, "user_xattr"},
{Opt_nouser_xattr, "nouser_xattr"},
{Opt_acl, "acl"},
@@ -121,6 +151,8 @@ static match_table_t f2fs_tokens = {
{Opt_active_logs, "active_logs=%u"},
{Opt_disable_ext_identify, "disable_ext_identify"},
{Opt_inline_xattr, "inline_xattr"},
+ {Opt_noinline_xattr, "noinline_xattr"},
+ {Opt_inline_xattr_size, "inline_xattr_size=%u"},
{Opt_inline_data, "inline_data"},
{Opt_inline_dentry, "inline_dentry"},
{Opt_noinline_dentry, "noinline_dentry"},
@@ -132,229 +164,198 @@ static match_table_t f2fs_tokens = {
{Opt_noextent_cache, "noextent_cache"},
{Opt_noinline_data, "noinline_data"},
{Opt_data_flush, "data_flush"},
+ {Opt_reserve_root, "reserve_root=%u"},
+ {Opt_resgid, "resgid=%u"},
+ {Opt_resuid, "resuid=%u"},
{Opt_mode, "mode=%s"},
+ {Opt_io_size_bits, "io_bits=%u"},
{Opt_fault_injection, "fault_injection=%u"},
{Opt_lazytime, "lazytime"},
{Opt_nolazytime, "nolazytime"},
+ {Opt_quota, "quota"},
+ {Opt_noquota, "noquota"},
+ {Opt_usrquota, "usrquota"},
+ {Opt_grpquota, "grpquota"},
+ {Opt_prjquota, "prjquota"},
+ {Opt_usrjquota, "usrjquota=%s"},
+ {Opt_grpjquota, "grpjquota=%s"},
+ {Opt_prjjquota, "prjjquota=%s"},
+ {Opt_offusrjquota, "usrjquota="},
+ {Opt_offgrpjquota, "grpjquota="},
+ {Opt_offprjjquota, "prjjquota="},
+ {Opt_jqfmt_vfsold, "jqfmt=vfsold"},
+ {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
+ {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
+ {Opt_whint, "whint_mode=%s"},
+ {Opt_alloc, "alloc_mode=%s"},
+ {Opt_fsync, "fsync_mode=%s"},
+ {Opt_test_dummy_encryption, "test_dummy_encryption"},
{Opt_err, NULL},
};
-/* Sysfs support for f2fs */
-enum {
- GC_THREAD, /* struct f2fs_gc_thread */
- SM_INFO, /* struct f2fs_sm_info */
- NM_INFO, /* struct f2fs_nm_info */
- F2FS_SBI, /* struct f2fs_sb_info */
-#ifdef CONFIG_F2FS_FAULT_INJECTION
- FAULT_INFO_RATE, /* struct f2fs_fault_info */
- FAULT_INFO_TYPE, /* struct f2fs_fault_info */
-#endif
-};
-
-struct f2fs_attr {
- struct attribute attr;
- ssize_t (*show)(struct f2fs_attr *, struct f2fs_sb_info *, char *);
- ssize_t (*store)(struct f2fs_attr *, struct f2fs_sb_info *,
- const char *, size_t);
- int struct_type;
- int offset;
-};
-
-static unsigned char *__struct_ptr(struct f2fs_sb_info *sbi, int struct_type)
+void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...)
{
- if (struct_type == GC_THREAD)
- return (unsigned char *)sbi->gc_thread;
- else if (struct_type == SM_INFO)
- return (unsigned char *)SM_I(sbi);
- else if (struct_type == NM_INFO)
- return (unsigned char *)NM_I(sbi);
- else if (struct_type == F2FS_SBI)
- return (unsigned char *)sbi;
-#ifdef CONFIG_F2FS_FAULT_INJECTION
- else if (struct_type == FAULT_INFO_RATE ||
- struct_type == FAULT_INFO_TYPE)
- return (unsigned char *)&sbi->fault_info;
-#endif
- return NULL;
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ printk_ratelimited("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf);
+ va_end(args);
}
-static ssize_t lifetime_write_kbytes_show(struct f2fs_attr *a,
- struct f2fs_sb_info *sbi, char *buf)
+static inline void limit_reserve_root(struct f2fs_sb_info *sbi)
{
- struct super_block *sb = sbi->sb;
-
- if (!sb->s_bdev->bd_part)
- return snprintf(buf, PAGE_SIZE, "0\n");
+ block_t limit = (sbi->user_block_count << 1) / 1000;
- return snprintf(buf, PAGE_SIZE, "%llu\n",
- (unsigned long long)(sbi->kbytes_written +
- BD_PART_WRITTEN(sbi)));
+ /* limit is 0.2% */
+ if (test_opt(sbi, RESERVE_ROOT) &&
+ F2FS_OPTION(sbi).root_reserved_blocks > limit) {
+ F2FS_OPTION(sbi).root_reserved_blocks = limit;
+ f2fs_msg(sbi->sb, KERN_INFO,
+ "Reduce reserved blocks for root = %u",
+ F2FS_OPTION(sbi).root_reserved_blocks);
+ }
+ if (!test_opt(sbi, RESERVE_ROOT) &&
+ (!uid_eq(F2FS_OPTION(sbi).s_resuid,
+ make_kuid(&init_user_ns, F2FS_DEF_RESUID)) ||
+ !gid_eq(F2FS_OPTION(sbi).s_resgid,
+ make_kgid(&init_user_ns, F2FS_DEF_RESGID))))
+ f2fs_msg(sbi->sb, KERN_INFO,
+ "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root",
+ from_kuid_munged(&init_user_ns,
+ F2FS_OPTION(sbi).s_resuid),
+ from_kgid_munged(&init_user_ns,
+ F2FS_OPTION(sbi).s_resgid));
}
-static ssize_t f2fs_sbi_show(struct f2fs_attr *a,
- struct f2fs_sb_info *sbi, char *buf)
+static void init_once(void *foo)
{
- unsigned char *ptr = NULL;
- unsigned int *ui;
-
- ptr = __struct_ptr(sbi, a->struct_type);
- if (!ptr)
- return -EINVAL;
-
- ui = (unsigned int *)(ptr + a->offset);
+ struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
- return snprintf(buf, PAGE_SIZE, "%u\n", *ui);
+ inode_init_once(&fi->vfs_inode);
}
-static ssize_t f2fs_sbi_store(struct f2fs_attr *a,
- struct f2fs_sb_info *sbi,
- const char *buf, size_t count)
+#ifdef CONFIG_QUOTA
+static const char * const quotatypes[] = INITQFNAMES;
+#define QTYPE2NAME(t) (quotatypes[t])
+static int f2fs_set_qf_name(struct super_block *sb, int qtype,
+ substring_t *args)
{
- unsigned char *ptr;
- unsigned long t;
- unsigned int *ui;
- ssize_t ret;
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ char *qname;
+ int ret = -EINVAL;
- ptr = __struct_ptr(sbi, a->struct_type);
- if (!ptr)
+ if (sb_any_quota_loaded(sb) && !F2FS_OPTION(sbi).s_qf_names[qtype]) {
+ f2fs_msg(sb, KERN_ERR,
+ "Cannot change journaled "
+ "quota options when quota turned on");
return -EINVAL;
+ }
+ if (f2fs_sb_has_quota_ino(sb)) {
+ f2fs_msg(sb, KERN_INFO,
+ "QUOTA feature is enabled, so ignore qf_name");
+ return 0;
+ }
- ui = (unsigned int *)(ptr + a->offset);
-
- ret = kstrtoul(skip_spaces(buf), 0, &t);
- if (ret < 0)
- return ret;
-#ifdef CONFIG_F2FS_FAULT_INJECTION
- if (a->struct_type == FAULT_INFO_TYPE && t >= (1 << FAULT_MAX))
+ qname = match_strdup(args);
+ if (!qname) {
+ f2fs_msg(sb, KERN_ERR,
+ "Not enough memory for storing quotafile name");
return -EINVAL;
-#endif
- *ui = t;
- return count;
-}
-
-static ssize_t f2fs_attr_show(struct kobject *kobj,
- struct attribute *attr, char *buf)
-{
- struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
- s_kobj);
- struct f2fs_attr *a = container_of(attr, struct f2fs_attr, attr);
-
- return a->show ? a->show(a, sbi, buf) : 0;
+ }
+ if (F2FS_OPTION(sbi).s_qf_names[qtype]) {
+ if (strcmp(F2FS_OPTION(sbi).s_qf_names[qtype], qname) == 0)
+ ret = 0;
+ else
+ f2fs_msg(sb, KERN_ERR,
+ "%s quota file already specified",
+ QTYPE2NAME(qtype));
+ goto errout;
+ }
+ if (strchr(qname, '/')) {
+ f2fs_msg(sb, KERN_ERR,
+ "quotafile must be on filesystem root");
+ goto errout;
+ }
+ F2FS_OPTION(sbi).s_qf_names[qtype] = qname;
+ set_opt(sbi, QUOTA);
+ return 0;
+errout:
+ kfree(qname);
+ return ret;
}
-static ssize_t f2fs_attr_store(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t len)
+static int f2fs_clear_qf_name(struct super_block *sb, int qtype)
{
- struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
- s_kobj);
- struct f2fs_attr *a = container_of(attr, struct f2fs_attr, attr);
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
- return a->store ? a->store(a, sbi, buf, len) : 0;
+ if (sb_any_quota_loaded(sb) && F2FS_OPTION(sbi).s_qf_names[qtype]) {
+ f2fs_msg(sb, KERN_ERR, "Cannot change journaled quota options"
+ " when quota turned on");
+ return -EINVAL;
+ }
+ kfree(F2FS_OPTION(sbi).s_qf_names[qtype]);
+ F2FS_OPTION(sbi).s_qf_names[qtype] = NULL;
+ return 0;
}
-static void f2fs_sb_release(struct kobject *kobj)
+static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
{
- struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
- s_kobj);
- complete(&sbi->s_kobj_unregister);
-}
-
-#define F2FS_ATTR_OFFSET(_struct_type, _name, _mode, _show, _store, _offset) \
-static struct f2fs_attr f2fs_attr_##_name = { \
- .attr = {.name = __stringify(_name), .mode = _mode }, \
- .show = _show, \
- .store = _store, \
- .struct_type = _struct_type, \
- .offset = _offset \
-}
-
-#define F2FS_RW_ATTR(struct_type, struct_name, name, elname) \
- F2FS_ATTR_OFFSET(struct_type, name, 0644, \
- f2fs_sbi_show, f2fs_sbi_store, \
- offsetof(struct struct_name, elname))
-
-#define F2FS_GENERAL_RO_ATTR(name) \
-static struct f2fs_attr f2fs_attr_##name = __ATTR(name, 0444, name##_show, NULL)
-
-F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_min_sleep_time, min_sleep_time);
-F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_max_sleep_time, max_sleep_time);
-F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_no_gc_sleep_time, no_gc_sleep_time);
-F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_idle, gc_idle);
-F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, reclaim_segments, rec_prefree_segments);
-F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, max_small_discards, max_discards);
-F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, batched_trim_sections, trim_sections);
-F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, ipu_policy, ipu_policy);
-F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_ipu_util, min_ipu_util);
-F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_fsync_blocks, min_fsync_blocks);
-F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ram_thresh, ram_thresh);
-F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ra_nid_pages, ra_nid_pages);
-F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, dirty_nats_ratio, dirty_nats_ratio);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, max_victim_search, max_victim_search);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, dir_level, dir_level);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, cp_interval, interval_time[CP_TIME]);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, idle_interval, interval_time[REQ_TIME]);
-#ifdef CONFIG_F2FS_FAULT_INJECTION
-F2FS_RW_ATTR(FAULT_INFO_RATE, f2fs_fault_info, inject_rate, inject_rate);
-F2FS_RW_ATTR(FAULT_INFO_TYPE, f2fs_fault_info, inject_type, inject_type);
-#endif
-F2FS_GENERAL_RO_ATTR(lifetime_write_kbytes);
-
-#define ATTR_LIST(name) (&f2fs_attr_##name.attr)
-static struct attribute *f2fs_attrs[] = {
- ATTR_LIST(gc_min_sleep_time),
- ATTR_LIST(gc_max_sleep_time),
- ATTR_LIST(gc_no_gc_sleep_time),
- ATTR_LIST(gc_idle),
- ATTR_LIST(reclaim_segments),
- ATTR_LIST(max_small_discards),
- ATTR_LIST(batched_trim_sections),
- ATTR_LIST(ipu_policy),
- ATTR_LIST(min_ipu_util),
- ATTR_LIST(min_fsync_blocks),
- ATTR_LIST(max_victim_search),
- ATTR_LIST(dir_level),
- ATTR_LIST(ram_thresh),
- ATTR_LIST(ra_nid_pages),
- ATTR_LIST(dirty_nats_ratio),
- ATTR_LIST(cp_interval),
- ATTR_LIST(idle_interval),
-#ifdef CONFIG_F2FS_FAULT_INJECTION
- ATTR_LIST(inject_rate),
- ATTR_LIST(inject_type),
-#endif
- ATTR_LIST(lifetime_write_kbytes),
- NULL,
-};
-
-static const struct sysfs_ops f2fs_attr_ops = {
- .show = f2fs_attr_show,
- .store = f2fs_attr_store,
-};
+ /*
+ * We do the test below only for project quotas. 'usrquota' and
+ * 'grpquota' mount options are allowed even without quota feature
+ * to support legacy quotas in quota files.
+ */
+ if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi->sb)) {
+ f2fs_msg(sbi->sb, KERN_ERR, "Project quota feature not enabled. "
+ "Cannot enable project quota enforcement.");
+ return -1;
+ }
+ if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
+ F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
+ F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) {
+ if (test_opt(sbi, USRQUOTA) &&
+ F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
+ clear_opt(sbi, USRQUOTA);
-static struct kobj_type f2fs_ktype = {
- .default_attrs = f2fs_attrs,
- .sysfs_ops = &f2fs_attr_ops,
- .release = f2fs_sb_release,
-};
+ if (test_opt(sbi, GRPQUOTA) &&
+ F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
+ clear_opt(sbi, GRPQUOTA);
-void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...)
-{
- struct va_format vaf;
- va_list args;
+ if (test_opt(sbi, PRJQUOTA) &&
+ F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
+ clear_opt(sbi, PRJQUOTA);
- va_start(args, fmt);
- vaf.fmt = fmt;
- vaf.va = &args;
- printk("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf);
- va_end(args);
-}
+ if (test_opt(sbi, GRPQUOTA) || test_opt(sbi, USRQUOTA) ||
+ test_opt(sbi, PRJQUOTA)) {
+ f2fs_msg(sbi->sb, KERN_ERR, "old and new quota "
+ "format mixing");
+ return -1;
+ }
-static void init_once(void *foo)
-{
- struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
+ if (!F2FS_OPTION(sbi).s_jquota_fmt) {
+ f2fs_msg(sbi->sb, KERN_ERR, "journaled quota format "
+ "not specified");
+ return -1;
+ }
+ }
- inode_init_once(&fi->vfs_inode);
+ if (f2fs_sb_has_quota_ino(sbi->sb) && F2FS_OPTION(sbi).s_jquota_fmt) {
+ f2fs_msg(sbi->sb, KERN_INFO,
+ "QUOTA feature is enabled, so ignore jquota_fmt");
+ F2FS_OPTION(sbi).s_jquota_fmt = 0;
+ }
+ if (f2fs_sb_has_quota_ino(sbi->sb) && f2fs_readonly(sbi->sb)) {
+ f2fs_msg(sbi->sb, KERN_INFO,
+ "Filesystem with quota feature cannot be mounted RDWR "
+ "without CONFIG_QUOTA");
+ return -1;
+ }
+ return 0;
}
+#endif
static int parse_options(struct super_block *sb, char *options)
{
@@ -363,6 +364,11 @@ static int parse_options(struct super_bl
substring_t args[MAX_OPT_ARGS];
char *p, *name;
int arg = 0;
+ kuid_t uid;
+ kgid_t gid;
+#ifdef CONFIG_QUOTA
+ int ret;
+#endif
if (!options)
return 0;
@@ -412,17 +418,26 @@ static int parse_options(struct super_bl
q = bdev_get_queue(sb->s_bdev);
if (blk_queue_discard(q)) {
set_opt(sbi, DISCARD);
- } else {
+ } else if (!f2fs_sb_has_blkzoned(sb)) {
f2fs_msg(sb, KERN_WARNING,
"mounting with \"discard\" option, but "
"the device does not support discard");
}
break;
case Opt_nodiscard:
+ if (f2fs_sb_has_blkzoned(sb)) {
+ f2fs_msg(sb, KERN_WARNING,
+ "discard is required for zoned block devices");
+ return -EINVAL;
+ }
clear_opt(sbi, DISCARD);
+ break;
case Opt_noheap:
set_opt(sbi, NOHEAP);
break;
+ case Opt_heap:
+ clear_opt(sbi, NOHEAP);
+ break;
#ifdef CONFIG_F2FS_FS_XATTR
case Opt_user_xattr:
set_opt(sbi, XATTR_USER);
@@ -433,6 +448,15 @@ static int parse_options(struct super_bl
case Opt_inline_xattr:
set_opt(sbi, INLINE_XATTR);
break;
+ case Opt_noinline_xattr:
+ clear_opt(sbi, INLINE_XATTR);
+ break;
+ case Opt_inline_xattr_size:
+ if (args->from && match_int(args, &arg))
+ return -EINVAL;
+ set_opt(sbi, INLINE_XATTR_SIZE);
+ F2FS_OPTION(sbi).inline_xattr_size = arg;
+ break;
#else
case Opt_user_xattr:
f2fs_msg(sb, KERN_INFO,
@@ -446,6 +470,10 @@ static int parse_options(struct super_bl
f2fs_msg(sb, KERN_INFO,
"inline_xattr options not supported");
break;
+ case Opt_noinline_xattr:
+ f2fs_msg(sb, KERN_INFO,
+ "noinline_xattr options not supported");
+ break;
#endif
#ifdef CONFIG_F2FS_FS_POSIX_ACL
case Opt_acl:
@@ -467,7 +495,7 @@ static int parse_options(struct super_bl
return -EINVAL;
if (arg != 2 && arg != 4 && arg != NR_CURSEG_TYPE)
return -EINVAL;
- sbi->active_logs = arg;
+ F2FS_OPTION(sbi).active_logs = arg;
break;
case Opt_disable_ext_identify:
set_opt(sbi, DISABLE_EXT_IDENTIFY);
@@ -505,6 +533,40 @@ static int parse_options(struct super_bl
case Opt_data_flush:
set_opt(sbi, DATA_FLUSH);
break;
+ case Opt_reserve_root:
+ if (args->from && match_int(args, &arg))
+ return -EINVAL;
+ if (test_opt(sbi, RESERVE_ROOT)) {
+ f2fs_msg(sb, KERN_INFO,
+ "Preserve previous reserve_root=%u",
+ F2FS_OPTION(sbi).root_reserved_blocks);
+ } else {
+ F2FS_OPTION(sbi).root_reserved_blocks = arg;
+ set_opt(sbi, RESERVE_ROOT);
+ }
+ break;
+ case Opt_resuid:
+ if (args->from && match_int(args, &arg))
+ return -EINVAL;
+ uid = make_kuid(current_user_ns(), arg);
+ if (!uid_valid(uid)) {
+ f2fs_msg(sb, KERN_ERR,
+ "Invalid uid value %d", arg);
+ return -EINVAL;
+ }
+ F2FS_OPTION(sbi).s_resuid = uid;
+ break;
+ case Opt_resgid:
+ if (args->from && match_int(args, &arg))
+ return -EINVAL;
+ gid = make_kgid(current_user_ns(), arg);
+ if (!gid_valid(gid)) {
+ f2fs_msg(sb, KERN_ERR,
+ "Invalid gid value %d", arg);
+ return -EINVAL;
+ }
+ F2FS_OPTION(sbi).s_resgid = gid;
+ break;
case Opt_mode:
name = match_strdup(&args[0]);
@@ -512,6 +574,13 @@ static int parse_options(struct super_bl
return -ENOMEM;
if (strlen(name) == 8 &&
!strncmp(name, "adaptive", 8)) {
+ if (f2fs_sb_has_blkzoned(sb)) {
+ f2fs_msg(sb, KERN_WARNING,
+ "adaptive mode is not allowed with "
+ "zoned block device feature");
+ kfree(name);
+ return -EINVAL;
+ }
set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
} else if (strlen(name) == 3 &&
!strncmp(name, "lfs", 3)) {
@@ -522,11 +591,23 @@ static int parse_options(struct super_bl
}
kfree(name);
break;
+ case Opt_io_size_bits:
+ if (args->from && match_int(args, &arg))
+ return -EINVAL;
+ if (arg > __ilog2_u32(BIO_MAX_PAGES)) {
+ f2fs_msg(sb, KERN_WARNING,
+ "Not support %d, larger than %d",
+ 1 << arg, BIO_MAX_PAGES);
+ return -EINVAL;
+ }
+ F2FS_OPTION(sbi).write_io_size_bits = arg;
+ break;
case Opt_fault_injection:
if (args->from && match_int(args, &arg))
return -EINVAL;
#ifdef CONFIG_F2FS_FAULT_INJECTION
f2fs_build_fault_attr(sbi, arg);
+ set_opt(sbi, FAULT_INJECTION);
#else
f2fs_msg(sb, KERN_INFO,
"FAULT_INJECTION was not selected");
@@ -538,6 +619,152 @@ static int parse_options(struct super_bl
case Opt_nolazytime:
sb->s_flags &= ~MS_LAZYTIME;
break;
+#ifdef CONFIG_QUOTA
+ case Opt_quota:
+ case Opt_usrquota:
+ set_opt(sbi, USRQUOTA);
+ break;
+ case Opt_grpquota:
+ set_opt(sbi, GRPQUOTA);
+ break;
+ case Opt_prjquota:
+ set_opt(sbi, PRJQUOTA);
+ break;
+ case Opt_usrjquota:
+ ret = f2fs_set_qf_name(sb, USRQUOTA, &args[0]);
+ if (ret)
+ return ret;
+ break;
+ case Opt_grpjquota:
+ ret = f2fs_set_qf_name(sb, GRPQUOTA, &args[0]);
+ if (ret)
+ return ret;
+ break;
+ case Opt_prjjquota:
+ ret = f2fs_set_qf_name(sb, PRJQUOTA, &args[0]);
+ if (ret)
+ return ret;
+ break;
+ case Opt_offusrjquota:
+ ret = f2fs_clear_qf_name(sb, USRQUOTA);
+ if (ret)
+ return ret;
+ break;
+ case Opt_offgrpjquota:
+ ret = f2fs_clear_qf_name(sb, GRPQUOTA);
+ if (ret)
+ return ret;
+ break;
+ case Opt_offprjjquota:
+ ret = f2fs_clear_qf_name(sb, PRJQUOTA);
+ if (ret)
+ return ret;
+ break;
+ case Opt_jqfmt_vfsold:
+ F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_OLD;
+ break;
+ case Opt_jqfmt_vfsv0:
+ F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V0;
+ break;
+ case Opt_jqfmt_vfsv1:
+ F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V1;
+ break;
+ case Opt_noquota:
+ clear_opt(sbi, QUOTA);
+ clear_opt(sbi, USRQUOTA);
+ clear_opt(sbi, GRPQUOTA);
+ clear_opt(sbi, PRJQUOTA);
+ break;
+#else
+ case Opt_quota:
+ case Opt_usrquota:
+ case Opt_grpquota:
+ case Opt_prjquota:
+ case Opt_usrjquota:
+ case Opt_grpjquota:
+ case Opt_prjjquota:
+ case Opt_offusrjquota:
+ case Opt_offgrpjquota:
+ case Opt_offprjjquota:
+ case Opt_jqfmt_vfsold:
+ case Opt_jqfmt_vfsv0:
+ case Opt_jqfmt_vfsv1:
+ case Opt_noquota:
+ f2fs_msg(sb, KERN_INFO,
+ "quota operations not supported");
+ break;
+#endif
+ case Opt_whint:
+ name = match_strdup(&args[0]);
+ if (!name)
+ return -ENOMEM;
+ if (strlen(name) == 10 &&
+ !strncmp(name, "user-based", 10)) {
+ F2FS_OPTION(sbi).whint_mode = WHINT_MODE_USER;
+ } else if (strlen(name) == 3 &&
+ !strncmp(name, "off", 3)) {
+ F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
+ } else if (strlen(name) == 8 &&
+ !strncmp(name, "fs-based", 8)) {
+ F2FS_OPTION(sbi).whint_mode = WHINT_MODE_FS;
+ } else {
+ kfree(name);
+ return -EINVAL;
+ }
+ kfree(name);
+ break;
+ case Opt_alloc:
+ name = match_strdup(&args[0]);
+ if (!name)
+ return -ENOMEM;
+
+ if (strlen(name) == 7 &&
+ !strncmp(name, "default", 7)) {
+ F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
+ } else if (strlen(name) == 5 &&
+ !strncmp(name, "reuse", 5)) {
+ F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
+ } else {
+ kfree(name);
+ return -EINVAL;
+ }
+ kfree(name);
+ break;
+ case Opt_fsync:
+ name = match_strdup(&args[0]);
+ if (!name)
+ return -ENOMEM;
+ if (strlen(name) == 5 &&
+ !strncmp(name, "posix", 5)) {
+ F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
+ } else if (strlen(name) == 6 &&
+ !strncmp(name, "strict", 6)) {
+ F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_STRICT;
+ } else if (strlen(name) == 9 &&
+ !strncmp(name, "nobarrier", 9)) {
+ F2FS_OPTION(sbi).fsync_mode =
+ FSYNC_MODE_NOBARRIER;
+ } else {
+ kfree(name);
+ return -EINVAL;
+ }
+ kfree(name);
+ break;
+ case Opt_test_dummy_encryption:
+#ifdef CONFIG_F2FS_FS_ENCRYPTION
+ if (!f2fs_sb_has_encrypt(sb)) {
+ f2fs_msg(sb, KERN_ERR, "Encrypt feature is off");
+ return -EINVAL;
+ }
+
+ F2FS_OPTION(sbi).test_dummy_encryption = true;
+ f2fs_msg(sb, KERN_INFO,
+ "Test dummy encryption mode enabled");
+#else
+ f2fs_msg(sb, KERN_INFO,
+ "Test dummy encryption mount option ignored");
+#endif
+ break;
default:
f2fs_msg(sb, KERN_ERR,
"Unrecognized mount option \"%s\" or missing value",
@@ -545,6 +772,49 @@ static int parse_options(struct super_bl
return -EINVAL;
}
}
+#ifdef CONFIG_QUOTA
+ if (f2fs_check_quota_options(sbi))
+ return -EINVAL;
+#endif
+
+ if (F2FS_IO_SIZE_BITS(sbi) && !test_opt(sbi, LFS)) {
+ f2fs_msg(sb, KERN_ERR,
+ "Should set mode=lfs with %uKB-sized IO",
+ F2FS_IO_SIZE_KB(sbi));
+ return -EINVAL;
+ }
+
+ if (test_opt(sbi, INLINE_XATTR_SIZE)) {
+ if (!f2fs_sb_has_extra_attr(sb) ||
+ !f2fs_sb_has_flexible_inline_xattr(sb)) {
+ f2fs_msg(sb, KERN_ERR,
+ "extra_attr or flexible_inline_xattr "
+ "feature is off");
+ return -EINVAL;
+ }
+ if (!test_opt(sbi, INLINE_XATTR)) {
+ f2fs_msg(sb, KERN_ERR,
+ "inline_xattr_size option should be "
+ "set with inline_xattr option");
+ return -EINVAL;
+ }
+ if (!F2FS_OPTION(sbi).inline_xattr_size ||
+ F2FS_OPTION(sbi).inline_xattr_size >=
+ DEF_ADDRS_PER_INODE -
+ F2FS_TOTAL_EXTRA_ATTR_SIZE -
+ DEF_INLINE_RESERVED_SIZE -
+ DEF_MIN_INLINE_SIZE) {
+ f2fs_msg(sb, KERN_ERR,
+ "inline xattr size is out of range");
+ return -EINVAL;
+ }
+ }
+
+ /* Not pass down write hints if the number of active logs is lesser
+ * than NR_CURSEG_TYPE.
+ */
+ if (F2FS_OPTION(sbi).active_logs != NR_CURSEG_TYPE)
+ F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
return 0;
}
@@ -559,25 +829,28 @@ static struct inode *f2fs_alloc_inode(st
init_once((void *) fi);
/* Initialize f2fs-specific inode info */
- fi->vfs_inode.i_version = 1;
atomic_set(&fi->dirty_pages, 0);
fi->i_current_depth = 1;
- fi->i_advise = 0;
init_rwsem(&fi->i_sem);
INIT_LIST_HEAD(&fi->dirty_list);
INIT_LIST_HEAD(&fi->gdirty_list);
+ INIT_LIST_HEAD(&fi->inmem_ilist);
INIT_LIST_HEAD(&fi->inmem_pages);
mutex_init(&fi->inmem_lock);
init_rwsem(&fi->dio_rwsem[READ]);
init_rwsem(&fi->dio_rwsem[WRITE]);
+ init_rwsem(&fi->i_mmap_sem);
+ init_rwsem(&fi->i_xattr_sem);
/* Will be used by directory only */
fi->i_dir_level = F2FS_SB(sb)->dir_level;
+
return &fi->vfs_inode;
}
static int f2fs_drop_inode(struct inode *inode)
{
+ int ret;
/*
* This is to avoid a deadlock condition like below.
* writeback_single_inode(inode)
@@ -606,34 +879,36 @@ static int f2fs_drop_inode(struct inode
sb_end_intwrite(inode->i_sb);
- fscrypt_put_encryption_info(inode, NULL);
spin_lock(&inode->i_lock);
atomic_dec(&inode->i_count);
}
+ trace_f2fs_drop_inode(inode, 0);
return 0;
}
-
- return generic_drop_inode(inode);
+ ret = generic_drop_inode(inode);
+ trace_f2fs_drop_inode(inode, ret);
+ return ret;
}
-int f2fs_inode_dirtied(struct inode *inode)
+int f2fs_inode_dirtied(struct inode *inode, bool sync)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ int ret = 0;
spin_lock(&sbi->inode_lock[DIRTY_META]);
if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
- spin_unlock(&sbi->inode_lock[DIRTY_META]);
- return 1;
+ ret = 1;
+ } else {
+ set_inode_flag(inode, FI_DIRTY_INODE);
+ stat_inc_dirty_inode(sbi, DIRTY_META);
}
-
- set_inode_flag(inode, FI_DIRTY_INODE);
- list_add_tail(&F2FS_I(inode)->gdirty_list,
+ if (sync && list_empty(&F2FS_I(inode)->gdirty_list)) {
+ list_add_tail(&F2FS_I(inode)->gdirty_list,
&sbi->inode_list[DIRTY_META]);
- inc_page_count(sbi, F2FS_DIRTY_IMETA);
- stat_inc_dirty_inode(sbi, DIRTY_META);
+ inc_page_count(sbi, F2FS_DIRTY_IMETA);
+ }
spin_unlock(&sbi->inode_lock[DIRTY_META]);
-
- return 0;
+ return ret;
}
void f2fs_inode_synced(struct inode *inode)
@@ -645,10 +920,12 @@ void f2fs_inode_synced(struct inode *ino
spin_unlock(&sbi->inode_lock[DIRTY_META]);
return;
}
- list_del_init(&F2FS_I(inode)->gdirty_list);
+ if (!list_empty(&F2FS_I(inode)->gdirty_list)) {
+ list_del_init(&F2FS_I(inode)->gdirty_list);
+ dec_page_count(sbi, F2FS_DIRTY_IMETA);
+ }
clear_inode_flag(inode, FI_DIRTY_INODE);
clear_inode_flag(inode, FI_AUTO_RECOVER);
- dec_page_count(sbi, F2FS_DIRTY_IMETA);
stat_dec_dirty_inode(F2FS_I_SB(inode), DIRTY_META);
spin_unlock(&sbi->inode_lock[DIRTY_META]);
}
@@ -672,7 +949,7 @@ static void f2fs_dirty_inode(struct inod
if (is_inode_flag_set(inode, FI_AUTO_RECOVER))
clear_inode_flag(inode, FI_AUTO_RECOVER);
- f2fs_inode_dirtied(inode);
+ f2fs_inode_dirtied(inode, false);
}
static void f2fs_i_callback(struct rcu_head *head)
@@ -692,18 +969,26 @@ static void destroy_percpu_info(struct f
percpu_counter_destroy(&sbi->total_valid_inode_count);
}
-static void f2fs_put_super(struct super_block *sb)
+static void destroy_device_list(struct f2fs_sb_info *sbi)
{
- struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ int i;
- if (sbi->s_proc) {
- remove_proc_entry("segment_info", sbi->s_proc);
- remove_proc_entry("segment_bits", sbi->s_proc);
- remove_proc_entry(sb->s_id, f2fs_proc_root);
+ for (i = 0; i < sbi->s_ndevs; i++) {
+ blkdev_put(FDEV(i).bdev, FMODE_EXCL);
+#ifdef CONFIG_BLK_DEV_ZONED
+ kfree(FDEV(i).blkz_type);
+#endif
}
- kobject_del(&sbi->s_kobj);
+ kfree(sbi->devs);
+}
- stop_gc_thread(sbi);
+static void f2fs_put_super(struct super_block *sb)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ int i;
+ bool dropped;
+
+ f2fs_quota_off_umount(sb);
/* prevent remaining shrinker jobs */
mutex_lock(&sbi->umount_mutex);
@@ -721,6 +1006,16 @@ static void f2fs_put_super(struct super_
write_checkpoint(sbi, &cpc);
}
+ /* be sure to wait for any on-going discard commands */
+ dropped = f2fs_wait_discard_bios(sbi);
+
+ if (f2fs_discard_en(sbi) && !sbi->discard_blks && !dropped) {
+ struct cp_control cpc = {
+ .reason = CP_UMOUNT | CP_TRIMMED,
+ };
+ write_checkpoint(sbi, &cpc);
+ }
+
/* write_checkpoint can update stat informaion */
f2fs_destroy_stats(sbi);
@@ -729,13 +1024,12 @@ static void f2fs_put_super(struct super_
* In addition, EIO will skip do checkpoint, we need this as well.
*/
release_ino_entry(sbi, true);
- release_discard_addrs(sbi);
f2fs_leave_shrinker(sbi);
mutex_unlock(&sbi->umount_mutex);
/* our cp_error case, we can wait for any writeback page */
- f2fs_flush_merged_bios(sbi);
+ f2fs_flush_merged_writes(sbi);
iput(sbi->node_inode);
iput(sbi->meta_inode);
@@ -745,15 +1039,23 @@ static void f2fs_put_super(struct super_
destroy_segment_manager(sbi);
kfree(sbi->ckpt);
- kobject_put(&sbi->s_kobj);
- wait_for_completion(&sbi->s_kobj_unregister);
+
+ f2fs_unregister_sysfs(sbi);
sb->s_fs_info = NULL;
if (sbi->s_chksum_driver)
crypto_free_shash(sbi->s_chksum_driver);
kfree(sbi->raw_super);
+ destroy_device_list(sbi);
+ mempool_destroy(sbi->write_io_dummy);
+#ifdef CONFIG_QUOTA
+ for (i = 0; i < MAXQUOTAS; i++)
+ kfree(F2FS_OPTION(sbi).s_qf_names[i]);
+#endif
destroy_percpu_info(sbi);
+ for (i = 0; i < NR_PAGE_TYPE; i++)
+ kfree(sbi->write_io[i]);
kfree(sbi);
}
@@ -762,8 +1064,14 @@ int f2fs_sync_fs(struct super_block *sb,
struct f2fs_sb_info *sbi = F2FS_SB(sb);
int err = 0;
+ if (unlikely(f2fs_cp_error(sbi)))
+ return 0;
+
trace_f2fs_sync_fs(sb, sync);
+ if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
+ return -EAGAIN;
+
if (sync) {
struct cp_control cpc;
@@ -780,13 +1088,17 @@ int f2fs_sync_fs(struct super_block *sb,
static int f2fs_freeze(struct super_block *sb)
{
- int err;
-
if (f2fs_readonly(sb))
return 0;
- err = f2fs_sync_fs(sb, 1);
- return err;
+ /* IO error happened before */
+ if (unlikely(f2fs_cp_error(F2FS_SB(sb))))
+ return -EIO;
+
+ /* must be clean, since sync_filesystem() was already called */
+ if (is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY))
+ return -EINVAL;
+ return 0;
}
static int f2fs_unfreeze(struct super_block *sb)
@@ -794,34 +1106,133 @@ static int f2fs_unfreeze(struct super_bl
return 0;
}
+#ifdef CONFIG_QUOTA
+static int f2fs_statfs_project(struct super_block *sb,
+ kprojid_t projid, struct kstatfs *buf)
+{
+ struct kqid qid;
+ struct dquot *dquot;
+ u64 limit;
+ u64 curblock;
+
+ qid = make_kqid_projid(projid);
+ dquot = dqget(sb, qid);
+ if (IS_ERR(dquot))
+ return PTR_ERR(dquot);
+ spin_lock(&dq_data_lock);
+
+ limit = (dquot->dq_dqb.dqb_bsoftlimit ?
+ dquot->dq_dqb.dqb_bsoftlimit :
+ dquot->dq_dqb.dqb_bhardlimit) >> sb->s_blocksize_bits;
+ if (limit && buf->f_blocks > limit) {
+ curblock = dquot->dq_dqb.dqb_curspace >> sb->s_blocksize_bits;
+ buf->f_blocks = limit;
+ buf->f_bfree = buf->f_bavail =
+ (buf->f_blocks > curblock) ?
+ (buf->f_blocks - curblock) : 0;
+ }
+
+ limit = dquot->dq_dqb.dqb_isoftlimit ?
+ dquot->dq_dqb.dqb_isoftlimit :
+ dquot->dq_dqb.dqb_ihardlimit;
+ if (limit && buf->f_files > limit) {
+ buf->f_files = limit;
+ buf->f_ffree =
+ (buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
+ (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
+ }
+
+ spin_unlock(&dq_data_lock);
+ dqput(dquot);
+ return 0;
+}
+#endif
+
static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
struct f2fs_sb_info *sbi = F2FS_SB(sb);
u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
- block_t total_count, user_block_count, start_count, ovp_count;
+ block_t total_count, user_block_count, start_count;
+ u64 avail_node_count;
total_count = le64_to_cpu(sbi->raw_super->block_count);
user_block_count = sbi->user_block_count;
start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
- ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg;
buf->f_type = F2FS_SUPER_MAGIC;
buf->f_bsize = sbi->blocksize;
buf->f_blocks = total_count - start_count;
- buf->f_bfree = user_block_count - valid_user_blocks(sbi) + ovp_count;
- buf->f_bavail = user_block_count - valid_user_blocks(sbi);
+ buf->f_bfree = user_block_count - valid_user_blocks(sbi) -
+ sbi->current_reserved_blocks;
+ if (buf->f_bfree > F2FS_OPTION(sbi).root_reserved_blocks)
+ buf->f_bavail = buf->f_bfree -
+ F2FS_OPTION(sbi).root_reserved_blocks;
+ else
+ buf->f_bavail = 0;
- buf->f_files = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
- buf->f_ffree = buf->f_files - valid_inode_count(sbi);
+ avail_node_count = sbi->total_node_count - sbi->nquota_files -
+ F2FS_RESERVED_NODE_NUM;
+
+ if (avail_node_count > user_block_count) {
+ buf->f_files = user_block_count;
+ buf->f_ffree = buf->f_bavail;
+ } else {
+ buf->f_files = avail_node_count;
+ buf->f_ffree = min(avail_node_count - valid_node_count(sbi),
+ buf->f_bavail);
+ }
buf->f_namelen = F2FS_NAME_LEN;
buf->f_fsid.val[0] = (u32)id;
buf->f_fsid.val[1] = (u32)(id >> 32);
+#ifdef CONFIG_QUOTA
+ if (is_inode_flag_set(dentry->d_inode, FI_PROJ_INHERIT) &&
+ sb_has_quota_limits_enabled(sb, PRJQUOTA)) {
+ f2fs_statfs_project(sb, F2FS_I(dentry->d_inode)->i_projid, buf);
+ }
+#endif
return 0;
}
+static inline void f2fs_show_quota_options(struct seq_file *seq,
+ struct super_block *sb)
+{
+#ifdef CONFIG_QUOTA
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+
+ if (F2FS_OPTION(sbi).s_jquota_fmt) {
+ char *fmtname = "";
+
+ switch (F2FS_OPTION(sbi).s_jquota_fmt) {
+ case QFMT_VFS_OLD:
+ fmtname = "vfsold";
+ break;
+ case QFMT_VFS_V0:
+ fmtname = "vfsv0";
+ break;
+ case QFMT_VFS_V1:
+ fmtname = "vfsv1";
+ break;
+ }
+ seq_printf(seq, ",jqfmt=%s", fmtname);
+ }
+
+ if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
+ seq_show_option(seq, "usrjquota",
+ F2FS_OPTION(sbi).s_qf_names[USRQUOTA]);
+
+ if (F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
+ seq_show_option(seq, "grpjquota",
+ F2FS_OPTION(sbi).s_qf_names[GRPQUOTA]);
+
+ if (F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
+ seq_show_option(seq, "prjjquota",
+ F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]);
+#endif
+}
+
static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
{
struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
@@ -839,7 +1250,9 @@ static int f2fs_show_options(struct seq_
if (test_opt(sbi, DISCARD))
seq_puts(seq, ",discard");
if (test_opt(sbi, NOHEAP))
- seq_puts(seq, ",no_heap_alloc");
+ seq_puts(seq, ",no_heap");
+ else
+ seq_puts(seq, ",heap");
#ifdef CONFIG_F2FS_FS_XATTR
if (test_opt(sbi, XATTR_USER))
seq_puts(seq, ",user_xattr");
@@ -847,6 +1260,11 @@ static int f2fs_show_options(struct seq_
seq_puts(seq, ",nouser_xattr");
if (test_opt(sbi, INLINE_XATTR))
seq_puts(seq, ",inline_xattr");
+ else
+ seq_puts(seq, ",noinline_xattr");
+ if (test_opt(sbi, INLINE_XATTR_SIZE))
+ seq_printf(seq, ",inline_xattr_size=%u",
+ F2FS_OPTION(sbi).inline_xattr_size);
#endif
#ifdef CONFIG_F2FS_FS_POSIX_ACL
if (test_opt(sbi, POSIX_ACL))
@@ -882,90 +1300,73 @@ static int f2fs_show_options(struct seq_
seq_puts(seq, "adaptive");
else if (test_opt(sbi, LFS))
seq_puts(seq, "lfs");
- seq_printf(seq, ",active_logs=%u", sbi->active_logs);
-
- return 0;
-}
-
-static int segment_info_seq_show(struct seq_file *seq, void *offset)
-{
- struct super_block *sb = seq->private;
- struct f2fs_sb_info *sbi = F2FS_SB(sb);
- unsigned int total_segs =
- le32_to_cpu(sbi->raw_super->segment_count_main);
- int i;
-
- seq_puts(seq, "format: segment_type|valid_blocks\n"
- "segment_type(0:HD, 1:WD, 2:CD, 3:HN, 4:WN, 5:CN)\n");
-
- for (i = 0; i < total_segs; i++) {
- struct seg_entry *se = get_seg_entry(sbi, i);
-
- if ((i % 10) == 0)
- seq_printf(seq, "%-10d", i);
- seq_printf(seq, "%d|%-3u", se->type,
- get_valid_blocks(sbi, i, 1));
- if ((i % 10) == 9 || i == (total_segs - 1))
- seq_putc(seq, '\n');
- else
- seq_putc(seq, ' ');
- }
-
- return 0;
-}
-
-static int segment_bits_seq_show(struct seq_file *seq, void *offset)
-{
- struct super_block *sb = seq->private;
- struct f2fs_sb_info *sbi = F2FS_SB(sb);
- unsigned int total_segs =
- le32_to_cpu(sbi->raw_super->segment_count_main);
- int i, j;
-
- seq_puts(seq, "format: segment_type|valid_blocks|bitmaps\n"
- "segment_type(0:HD, 1:WD, 2:CD, 3:HN, 4:WN, 5:CN)\n");
+ seq_printf(seq, ",active_logs=%u", F2FS_OPTION(sbi).active_logs);
+ if (test_opt(sbi, RESERVE_ROOT))
+ seq_printf(seq, ",reserve_root=%u,resuid=%u,resgid=%u",
+ F2FS_OPTION(sbi).root_reserved_blocks,
+ from_kuid_munged(&init_user_ns,
+ F2FS_OPTION(sbi).s_resuid),
+ from_kgid_munged(&init_user_ns,
+ F2FS_OPTION(sbi).s_resgid));
+ if (F2FS_IO_SIZE_BITS(sbi))
+ seq_printf(seq, ",io_size=%uKB", F2FS_IO_SIZE_KB(sbi));
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ if (test_opt(sbi, FAULT_INJECTION))
+ seq_printf(seq, ",fault_injection=%u",
+ F2FS_OPTION(sbi).fault_info.inject_rate);
+#endif
+#ifdef CONFIG_QUOTA
+ if (test_opt(sbi, QUOTA))
+ seq_puts(seq, ",quota");
+ if (test_opt(sbi, USRQUOTA))
+ seq_puts(seq, ",usrquota");
+ if (test_opt(sbi, GRPQUOTA))
+ seq_puts(seq, ",grpquota");
+ if (test_opt(sbi, PRJQUOTA))
+ seq_puts(seq, ",prjquota");
+#endif
+ f2fs_show_quota_options(seq, sbi->sb);
+ if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_USER)
+ seq_printf(seq, ",whint_mode=%s", "user-based");
+ else if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_FS)
+ seq_printf(seq, ",whint_mode=%s", "fs-based");
+#ifdef CONFIG_F2FS_FS_ENCRYPTION
+ if (F2FS_OPTION(sbi).test_dummy_encryption)
+ seq_puts(seq, ",test_dummy_encryption");
+#endif
- for (i = 0; i < total_segs; i++) {
- struct seg_entry *se = get_seg_entry(sbi, i);
+ if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_DEFAULT)
+ seq_printf(seq, ",alloc_mode=%s", "default");
+ else if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
+ seq_printf(seq, ",alloc_mode=%s", "reuse");
- seq_printf(seq, "%-10d", i);
- seq_printf(seq, "%d|%-3u|", se->type,
- get_valid_blocks(sbi, i, 1));
- for (j = 0; j < SIT_VBLOCK_MAP_SIZE; j++)
- seq_printf(seq, " %.2x", se->cur_valid_map[j]);
- seq_putc(seq, '\n');
- }
+ if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_POSIX)
+ seq_printf(seq, ",fsync_mode=%s", "posix");
+ else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT)
+ seq_printf(seq, ",fsync_mode=%s", "strict");
return 0;
}
-#define F2FS_PROC_FILE_DEF(_name) \
-static int _name##_open_fs(struct inode *inode, struct file *file) \
-{ \
- return single_open(file, _name##_seq_show, PDE_DATA(inode)); \
-} \
- \
-static const struct file_operations f2fs_seq_##_name##_fops = { \
- .open = _name##_open_fs, \
- .read = seq_read, \
- .llseek = seq_lseek, \
- .release = single_release, \
-};
-
-F2FS_PROC_FILE_DEF(segment_info);
-F2FS_PROC_FILE_DEF(segment_bits);
-
static void default_options(struct f2fs_sb_info *sbi)
{
/* init some FS parameters */
- sbi->active_logs = NR_CURSEG_TYPE;
+ F2FS_OPTION(sbi).active_logs = NR_CURSEG_TYPE;
+ F2FS_OPTION(sbi).inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
+ F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
+ F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
+ F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
+ F2FS_OPTION(sbi).test_dummy_encryption = false;
+ sbi->readdir_ra = 1;
set_opt(sbi, BG_GC);
+ set_opt(sbi, INLINE_XATTR);
set_opt(sbi, INLINE_DATA);
set_opt(sbi, INLINE_DENTRY);
set_opt(sbi, EXTENT_CACHE);
+ set_opt(sbi, NOHEAP);
sbi->sb->s_flags |= MS_LAZYTIME;
set_opt(sbi, FLUSH_MERGE);
- if (f2fs_sb_mounted_hmsmr(sbi->sb)) {
+ if (f2fs_sb_has_blkzoned(sbi->sb)) {
set_opt_mode(sbi, F2FS_MOUNT_LFS);
set_opt(sbi, DISCARD);
} else {
@@ -984,16 +1385,20 @@ static void default_options(struct f2fs_
#endif
}
+#ifdef CONFIG_QUOTA
+static int f2fs_enable_quotas(struct super_block *sb);
+#endif
static int f2fs_remount(struct super_block *sb, int *flags, char *data)
{
struct f2fs_sb_info *sbi = F2FS_SB(sb);
struct f2fs_mount_info org_mount_opt;
- int err, active_logs;
+ unsigned long old_sb_flags;
+ int err;
bool need_restart_gc = false;
bool need_stop_gc = false;
bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE);
-#ifdef CONFIG_F2FS_FAULT_INJECTION
- struct f2fs_fault_info ffi = sbi->fault_info;
+#ifdef CONFIG_QUOTA
+ int i, j;
#endif
/*
@@ -1001,7 +1406,25 @@ static int f2fs_remount(struct super_blo
* need to restore them.
*/
org_mount_opt = sbi->mount_opt;
- active_logs = sbi->active_logs;
+ old_sb_flags = sb->s_flags;
+
+#ifdef CONFIG_QUOTA
+ org_mount_opt.s_jquota_fmt = F2FS_OPTION(sbi).s_jquota_fmt;
+ for (i = 0; i < MAXQUOTAS; i++) {
+ if (F2FS_OPTION(sbi).s_qf_names[i]) {
+ org_mount_opt.s_qf_names[i] =
+ kstrdup(F2FS_OPTION(sbi).s_qf_names[i],
+ GFP_KERNEL);
+ if (!org_mount_opt.s_qf_names[i]) {
+ for (j = 0; j < i; j++)
+ kfree(org_mount_opt.s_qf_names[j]);
+ return -ENOMEM;
+ }
+ } else {
+ org_mount_opt.s_qf_names[i] = NULL;
+ }
+ }
+#endif
/* recover superblocks we couldn't write due to previous RO mount */
if (!(*flags & MS_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
@@ -1012,7 +1435,6 @@ static int f2fs_remount(struct super_blo
clear_sbi_flag(sbi, SBI_NEED_SB_WRITE);
}
- sbi->mount_opt.opt = 0;
default_options(sbi);
/* parse mount options */
@@ -1027,6 +1449,23 @@ static int f2fs_remount(struct super_blo
if (f2fs_readonly(sb) && (*flags & MS_RDONLY))
goto skip;
+#ifdef CONFIG_QUOTA
+ if (!f2fs_readonly(sb) && (*flags & MS_RDONLY)) {
+ err = dquot_suspend(sb, -1);
+ if (err < 0)
+ goto restore_opts;
+ } else if (f2fs_readonly(sb) && !(*flags & MS_RDONLY)) {
+ /* dquot_resume needs RW */
+ sb->s_flags &= ~MS_RDONLY;
+ if (sb_any_quota_suspended(sb)) {
+ dquot_resume(sb, -1);
+ } else if (f2fs_sb_has_quota_ino(sb)) {
+ err = f2fs_enable_quotas(sb);
+ if (err)
+ goto restore_opts;
+ }
+ }
+#endif
/* disallow enable/disable extent_cache dynamically */
if (no_extent_cache == !!test_opt(sbi, EXTENT_CACHE)) {
err = -EINVAL;
@@ -1052,7 +1491,8 @@ static int f2fs_remount(struct super_blo
need_stop_gc = true;
}
- if (*flags & MS_RDONLY) {
+ if (*flags & MS_RDONLY ||
+ F2FS_OPTION(sbi).whint_mode != org_mount_opt.whint_mode) {
writeback_inodes_sb(sb, WB_REASON_SYNC);
sync_inodes_sb(sb);
@@ -1067,17 +1507,24 @@ static int f2fs_remount(struct super_blo
* or if flush_merge is not passed in mount option.
*/
if ((*flags & MS_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
- destroy_flush_cmd_control(sbi);
- } else if (!SM_I(sbi)->cmd_control_info) {
+ clear_opt(sbi, FLUSH_MERGE);
+ destroy_flush_cmd_control(sbi, false);
+ } else {
err = create_flush_cmd_control(sbi);
if (err)
goto restore_gc;
}
skip:
+#ifdef CONFIG_QUOTA
+ /* Release old quota file names */
+ for (i = 0; i < MAXQUOTAS; i++)
+ kfree(org_mount_opt.s_qf_names[i]);
+#endif
/* Update the POSIXACL Flag */
sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
(test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
+ limit_reserve_root(sbi);
return 0;
restore_gc:
if (need_restart_gc) {
@@ -1088,21 +1535,363 @@ restore_gc:
stop_gc_thread(sbi);
}
restore_opts:
- sbi->mount_opt = org_mount_opt;
- sbi->active_logs = active_logs;
-#ifdef CONFIG_F2FS_FAULT_INJECTION
- sbi->fault_info = ffi;
+#ifdef CONFIG_QUOTA
+ F2FS_OPTION(sbi).s_jquota_fmt = org_mount_opt.s_jquota_fmt;
+ for (i = 0; i < MAXQUOTAS; i++) {
+ kfree(F2FS_OPTION(sbi).s_qf_names[i]);
+ F2FS_OPTION(sbi).s_qf_names[i] = org_mount_opt.s_qf_names[i];
+ }
#endif
+ sbi->mount_opt = org_mount_opt;
+ sb->s_flags = old_sb_flags;
return err;
}
-static struct super_operations f2fs_sops = {
+#ifdef CONFIG_QUOTA
+/* Read data from quotafile */
+static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data,
+ size_t len, loff_t off)
+{
+ struct inode *inode = sb_dqopt(sb)->files[type];
+ struct address_space *mapping = inode->i_mapping;
+ block_t blkidx = F2FS_BYTES_TO_BLK(off);
+ int offset = off & (sb->s_blocksize - 1);
+ int tocopy;
+ size_t toread;
+ loff_t i_size = i_size_read(inode);
+ struct page *page;
+ char *kaddr;
+
+ if (off > i_size)
+ return 0;
+
+ if (off + len > i_size)
+ len = i_size - off;
+ toread = len;
+ while (toread > 0) {
+ tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread);
+repeat:
+ page = read_cache_page_gfp(mapping, blkidx, GFP_NOFS);
+ if (IS_ERR(page)) {
+ if (PTR_ERR(page) == -ENOMEM) {
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ goto repeat;
+ }
+ return PTR_ERR(page);
+ }
+
+ lock_page(page);
+
+ if (unlikely(page->mapping != mapping)) {
+ f2fs_put_page(page, 1);
+ goto repeat;
+ }
+ if (unlikely(!PageUptodate(page))) {
+ f2fs_put_page(page, 1);
+ return -EIO;
+ }
+
+ kaddr = kmap_atomic(page);
+ memcpy(data, kaddr + offset, tocopy);
+ kunmap_atomic(kaddr);
+ f2fs_put_page(page, 1);
+
+ offset = 0;
+ toread -= tocopy;
+ data += tocopy;
+ blkidx++;
+ }
+ return len;
+}
+
+/* Write to quotafile */
+static ssize_t f2fs_quota_write(struct super_block *sb, int type,
+ const char *data, size_t len, loff_t off)
+{
+ struct inode *inode = sb_dqopt(sb)->files[type];
+ struct address_space *mapping = inode->i_mapping;
+ const struct address_space_operations *a_ops = mapping->a_ops;
+ int offset = off & (sb->s_blocksize - 1);
+ size_t towrite = len;
+ struct page *page;
+ char *kaddr;
+ int err = 0;
+ int tocopy;
+
+ while (towrite > 0) {
+ tocopy = min_t(unsigned long, sb->s_blocksize - offset,
+ towrite);
+retry:
+ err = a_ops->write_begin(NULL, mapping, off, tocopy, 0,
+ &page, NULL);
+ if (unlikely(err)) {
+ if (err == -ENOMEM) {
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ goto retry;
+ }
+ break;
+ }
+
+ kaddr = kmap_atomic(page);
+ memcpy(kaddr + offset, data, tocopy);
+ kunmap_atomic(kaddr);
+ flush_dcache_page(page);
+
+ a_ops->write_end(NULL, mapping, off, tocopy, tocopy,
+ page, NULL);
+ offset = 0;
+ towrite -= tocopy;
+ off += tocopy;
+ data += tocopy;
+ cond_resched();
+ }
+
+ if (len == towrite)
+ return err;
+ inode->i_mtime = inode->i_ctime = current_time(inode);
+ f2fs_mark_inode_dirty_sync(inode, false);
+ return len - towrite;
+}
+
+static struct dquot **f2fs_get_dquots(struct inode *inode)
+{
+ return F2FS_I(inode)->i_dquot;
+}
+
+static qsize_t *f2fs_get_reserved_space(struct inode *inode)
+{
+ return &F2FS_I(inode)->i_reserved_quota;
+}
+
+static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type)
+{
+ return dquot_quota_on_mount(sbi->sb, F2FS_OPTION(sbi).s_qf_names[type],
+ F2FS_OPTION(sbi).s_jquota_fmt, type);
+}
+
+int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly)
+{
+ int enabled = 0;
+ int i, err;
+
+ if (f2fs_sb_has_quota_ino(sbi->sb) && rdonly) {
+ err = f2fs_enable_quotas(sbi->sb);
+ if (err) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Cannot turn on quota_ino: %d", err);
+ return 0;
+ }
+ return 1;
+ }
+
+ for (i = 0; i < MAXQUOTAS; i++) {
+ if (F2FS_OPTION(sbi).s_qf_names[i]) {
+ err = f2fs_quota_on_mount(sbi, i);
+ if (!err) {
+ enabled = 1;
+ continue;
+ }
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Cannot turn on quotas: %d on %d", err, i);
+ }
+ }
+ return enabled;
+}
+
+static int f2fs_quota_enable(struct super_block *sb, int type, int format_id,
+ unsigned int flags)
+{
+ struct inode *qf_inode;
+ unsigned long qf_inum;
+ int err;
+
+ BUG_ON(!f2fs_sb_has_quota_ino(sb));
+
+ qf_inum = f2fs_qf_ino(sb, type);
+ if (!qf_inum)
+ return -EPERM;
+
+ qf_inode = f2fs_iget(sb, qf_inum);
+ if (IS_ERR(qf_inode)) {
+ f2fs_msg(sb, KERN_ERR,
+ "Bad quota inode %u:%lu", type, qf_inum);
+ return PTR_ERR(qf_inode);
+ }
+
+ /* Don't account quota for quota files to avoid recursion */
+ qf_inode->i_flags |= S_NOQUOTA;
+ err = dquot_enable(qf_inode, type, format_id, flags);
+ iput(qf_inode);
+ return err;
+}
+
+static int f2fs_enable_quotas(struct super_block *sb)
+{
+ int type, err = 0;
+ unsigned long qf_inum;
+ bool quota_mopt[MAXQUOTAS] = {
+ test_opt(F2FS_SB(sb), USRQUOTA),
+ test_opt(F2FS_SB(sb), GRPQUOTA),
+ test_opt(F2FS_SB(sb), PRJQUOTA),
+ };
+
+ sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
+ for (type = 0; type < MAXQUOTAS; type++) {
+ qf_inum = f2fs_qf_ino(sb, type);
+ if (qf_inum) {
+ err = f2fs_quota_enable(sb, type, QFMT_VFS_V1,
+ DQUOT_USAGE_ENABLED |
+ (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
+ if (err) {
+ f2fs_msg(sb, KERN_ERR,
+ "Failed to enable quota tracking "
+ "(type=%d, err=%d). Please run "
+ "fsck to fix.", type, err);
+ for (type--; type >= 0; type--)
+ dquot_quota_off(sb, type);
+ return err;
+ }
+ }
+ }
+ return 0;
+}
+
+static int f2fs_quota_sync(struct super_block *sb, int type)
+{
+ struct quota_info *dqopt = sb_dqopt(sb);
+ int cnt;
+ int ret;
+
+ ret = dquot_writeback_dquots(sb, type);
+ if (ret)
+ return ret;
+
+ /*
+ * Now when everything is written we can discard the pagecache so
+ * that userspace sees the changes.
+ */
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+ if (type != -1 && cnt != type)
+ continue;
+ if (!sb_has_quota_active(sb, cnt))
+ continue;
+
+ ret = filemap_write_and_wait(dqopt->files[cnt]->i_mapping);
+ if (ret)
+ return ret;
+
+ inode_lock(dqopt->files[cnt]);
+ truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
+ inode_unlock(dqopt->files[cnt]);
+ }
+ return 0;
+}
+
+static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
+ struct path *path)
+{
+ struct inode *inode;
+ int err;
+
+ err = f2fs_quota_sync(sb, type);
+ if (err)
+ return err;
+
+ err = dquot_quota_on(sb, type, format_id, path);
+ if (err)
+ return err;
+
+ inode = d_inode(path->dentry);
+
+ inode_lock(inode);
+ F2FS_I(inode)->i_flags |= FS_NOATIME_FL | FS_IMMUTABLE_FL;
+ inode_set_flags(inode, S_NOATIME | S_IMMUTABLE,
+ S_NOATIME | S_IMMUTABLE);
+ inode_unlock(inode);
+ f2fs_mark_inode_dirty_sync(inode, false);
+
+ return 0;
+}
+
+static int f2fs_quota_off(struct super_block *sb, int type)
+{
+ struct inode *inode = sb_dqopt(sb)->files[type];
+ int err;
+
+ if (!inode || !igrab(inode))
+ return dquot_quota_off(sb, type);
+
+ f2fs_quota_sync(sb, type);
+
+ err = dquot_quota_off(sb, type);
+ if (err || f2fs_sb_has_quota_ino(sb))
+ goto out_put;
+
+ inode_lock(inode);
+ F2FS_I(inode)->i_flags &= ~(FS_NOATIME_FL | FS_IMMUTABLE_FL);
+ inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE);
+ inode_unlock(inode);
+ f2fs_mark_inode_dirty_sync(inode, false);
+out_put:
+ iput(inode);
+ return err;
+}
+
+void f2fs_quota_off_umount(struct super_block *sb)
+{
+ int type;
+
+ for (type = 0; type < MAXQUOTAS; type++)
+ f2fs_quota_off(sb, type);
+}
+
+static int f2fs_get_projid(struct inode *inode, kprojid_t *projid)
+{
+ *projid = F2FS_I(inode)->i_projid;
+ return 0;
+}
+
+static const struct dquot_operations f2fs_quota_operations = {
+ .get_reserved_space = f2fs_get_reserved_space,
+ .write_dquot = dquot_commit,
+ .acquire_dquot = dquot_acquire,
+ .release_dquot = dquot_release,
+ .mark_dirty = dquot_mark_dquot_dirty,
+ .write_info = dquot_commit_info,
+ .alloc_dquot = dquot_alloc,
+ .destroy_dquot = dquot_destroy,
+ .get_projid = f2fs_get_projid,
+ .get_next_id = dquot_get_next_id,
+};
+
+static const struct quotactl_ops f2fs_quotactl_ops = {
+ .quota_on = f2fs_quota_on,
+ .quota_off = f2fs_quota_off,
+ .quota_sync = f2fs_quota_sync,
+ .get_state = dquot_get_state,
+ .set_info = dquot_set_dqinfo,
+ .get_dqblk = dquot_get_dqblk,
+ .set_dqblk = dquot_set_dqblk,
+ .get_nextdqblk = dquot_get_next_dqblk,
+};
+#else
+void f2fs_quota_off_umount(struct super_block *sb)
+{
+}
+#endif
+
+static const struct super_operations f2fs_sops = {
.alloc_inode = f2fs_alloc_inode,
.drop_inode = f2fs_drop_inode,
.destroy_inode = f2fs_destroy_inode,
.write_inode = f2fs_write_inode,
.dirty_inode = f2fs_dirty_inode,
.show_options = f2fs_show_options,
+#ifdef CONFIG_QUOTA
+ .quota_read = f2fs_quota_read,
+ .quota_write = f2fs_quota_write,
+ .get_dquots = f2fs_get_dquots,
+#endif
.evict_inode = f2fs_evict_inode,
.put_super = f2fs_put_super,
.sync_fs = f2fs_sync_fs,
@@ -1120,38 +1909,45 @@ static int f2fs_get_context(struct inode
ctx, len, NULL);
}
-static int f2fs_key_prefix(struct inode *inode, u8 **key)
-{
- *key = F2FS_I_SB(inode)->key_prefix;
- return F2FS_I_SB(inode)->key_prefix_size;
-}
-
static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len,
void *fs_data)
{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+
+ /*
+ * Encrypting the root directory is not allowed because fsck
+ * expects lost+found directory to exist and remain unencrypted
+ * if LOST_FOUND feature is enabled.
+ *
+ */
+ if (f2fs_sb_has_lost_found(sbi->sb) &&
+ inode->i_ino == F2FS_ROOT_INO(sbi))
+ return -EPERM;
+
return f2fs_setxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
ctx, len, fs_data, XATTR_CREATE);
}
+static bool f2fs_dummy_context(struct inode *inode)
+{
+ return DUMMY_ENCRYPTION_ENABLED(F2FS_I_SB(inode));
+}
+
static unsigned f2fs_max_namelen(struct inode *inode)
{
return S_ISLNK(inode->i_mode) ?
inode->i_sb->s_blocksize : F2FS_NAME_LEN;
}
-static struct fscrypt_operations f2fs_cryptops = {
+static const struct fscrypt_operations f2fs_cryptops = {
+ .key_prefix = "f2fs:",
.get_context = f2fs_get_context,
- .key_prefix = f2fs_key_prefix,
.set_context = f2fs_set_context,
- .is_encrypted = f2fs_encrypted_inode,
+ .dummy_context = f2fs_dummy_context,
.empty_dir = f2fs_empty_dir,
.max_namelen = f2fs_max_namelen,
};
-#else
-static struct fscrypt_operations f2fs_cryptops = {
- .is_encrypted = f2fs_encrypted_inode,
-};
#endif
static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
@@ -1201,9 +1997,16 @@ static const struct export_operations f2
static loff_t max_file_blocks(void)
{
- loff_t result = (DEF_ADDRS_PER_INODE - F2FS_INLINE_XATTR_ADDRS);
+ loff_t result = 0;
loff_t leaf_count = ADDRS_PER_BLOCK;
+ /*
+ * note: previously, result is equal to (DEF_ADDRS_PER_INODE -
+ * DEFAULT_INLINE_XATTR_ADDRS), but now f2fs try to reserve more
+ * space in inode.i_addr, it will be more safe to reassign
+ * result as zero.
+ */
+
/* two direct node blocks */
result += (leaf_count * 2);
@@ -1224,12 +2027,11 @@ static int __f2fs_commit_super(struct bu
lock_buffer(bh);
if (super)
memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
- set_buffer_uptodate(bh);
set_buffer_dirty(bh);
unlock_buffer(bh);
/* it's rare case, we can do fua all the time */
- return __sync_dirty_buffer(bh, WRITE_FLUSH_FUA);
+ return __sync_dirty_buffer(bh, REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
}
static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
@@ -1424,6 +2226,7 @@ int sanity_check_ckpt(struct f2fs_sb_inf
unsigned int total, fsmeta;
struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
+ unsigned int ovp_segments, reserved_segments;
unsigned int main_segs, blocks_per_seg;
int i;
@@ -1437,6 +2240,16 @@ int sanity_check_ckpt(struct f2fs_sb_inf
if (unlikely(fsmeta >= total))
return 1;
+ ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
+ reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
+
+ if (unlikely(fsmeta < F2FS_MIN_SEGMENTS ||
+ ovp_segments == 0 || reserved_segments == 0)) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Wrong layout: check mkfs.f2fs version");
+ return 1;
+ }
+
main_segs = le32_to_cpu(raw_super->segment_count_main);
blocks_per_seg = sbi->blocks_per_seg;
@@ -1461,7 +2274,7 @@ int sanity_check_ckpt(struct f2fs_sb_inf
static void init_sb_info(struct f2fs_sb_info *sbi)
{
struct f2fs_super_block *raw_super = sbi->raw_super;
- int i;
+ int i, j;
sbi->log_sectors_per_block =
le32_to_cpu(raw_super->log_sectors_per_block);
@@ -1489,17 +2302,19 @@ static void init_sb_info(struct f2fs_sb_
for (i = 0; i < NR_COUNT_TYPE; i++)
atomic_set(&sbi->nr_pages[i], 0);
+ atomic_set(&sbi->wb_sync_req, 0);
+
INIT_LIST_HEAD(&sbi->s_list);
mutex_init(&sbi->umount_mutex);
- mutex_init(&sbi->wio_mutex[NODE]);
- mutex_init(&sbi->wio_mutex[DATA]);
+ for (i = 0; i < NR_PAGE_TYPE - 1; i++)
+ for (j = HOT; j < NR_TEMP_TYPE; j++)
+ mutex_init(&sbi->wio_mutex[i][j]);
spin_lock_init(&sbi->cp_lock);
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
- memcpy(sbi->key_prefix, F2FS_KEY_DESC_PREFIX,
- F2FS_KEY_DESC_PREFIX_SIZE);
- sbi->key_prefix_size = F2FS_KEY_DESC_PREFIX_SIZE;
-#endif
+ sbi->dirty_device = 0;
+ spin_lock_init(&sbi->dev_lock);
+
+ init_rwsem(&sbi->sb_lock);
}
static int init_percpu_info(struct f2fs_sb_info *sbi)
@@ -1514,6 +2329,72 @@ static int init_percpu_info(struct f2fs_
GFP_KERNEL);
}
+#ifdef CONFIG_BLK_DEV_ZONED
+static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
+{
+ struct block_device *bdev = FDEV(devi).bdev;
+ sector_t nr_sectors = bdev->bd_part->nr_sects;
+ sector_t sector = 0;
+ struct blk_zone *zones;
+ unsigned int i, nr_zones;
+ unsigned int n = 0;
+ int err = -EIO;
+
+ if (!f2fs_sb_has_blkzoned(sbi->sb))
+ return 0;
+
+ if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
+ SECTOR_TO_BLOCK(bdev_zone_sectors(bdev)))
+ return -EINVAL;
+ sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_sectors(bdev));
+ if (sbi->log_blocks_per_blkz && sbi->log_blocks_per_blkz !=
+ __ilog2_u32(sbi->blocks_per_blkz))
+ return -EINVAL;
+ sbi->log_blocks_per_blkz = __ilog2_u32(sbi->blocks_per_blkz);
+ FDEV(devi).nr_blkz = SECTOR_TO_BLOCK(nr_sectors) >>
+ sbi->log_blocks_per_blkz;
+ if (nr_sectors & (bdev_zone_sectors(bdev) - 1))
+ FDEV(devi).nr_blkz++;
+
+ FDEV(devi).blkz_type = f2fs_kmalloc(sbi, FDEV(devi).nr_blkz,
+ GFP_KERNEL);
+ if (!FDEV(devi).blkz_type)
+ return -ENOMEM;
+
+#define F2FS_REPORT_NR_ZONES 4096
+
+ zones = f2fs_kzalloc(sbi, sizeof(struct blk_zone) *
+ F2FS_REPORT_NR_ZONES, GFP_KERNEL);
+ if (!zones)
+ return -ENOMEM;
+
+ /* Get block zones type */
+ while (zones && sector < nr_sectors) {
+
+ nr_zones = F2FS_REPORT_NR_ZONES;
+ err = blkdev_report_zones(bdev, sector,
+ zones, &nr_zones,
+ GFP_KERNEL);
+ if (err)
+ break;
+ if (!nr_zones) {
+ err = -EIO;
+ break;
+ }
+
+ for (i = 0; i < nr_zones; i++) {
+ FDEV(devi).blkz_type[n] = zones[i].type;
+ sector += zones[i].len;
+ n++;
+ }
+ }
+
+ kfree(zones);
+
+ return err;
+}
+#endif
+
/*
* Read f2fs raw super block.
* Because we have two copies of super block, so read both of them
@@ -1587,7 +2468,7 @@ int f2fs_commit_super(struct f2fs_sb_inf
}
/* write back-up superblock first */
- bh = sb_getblk(sbi->sb, sbi->valid_super_block ? 0: 1);
+ bh = sb_bread(sbi->sb, sbi->valid_super_block ? 0 : 1);
if (!bh)
return -EIO;
err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
@@ -1598,7 +2479,7 @@ int f2fs_commit_super(struct f2fs_sb_inf
return err;
/* write current valid superblock */
- bh = sb_getblk(sbi->sb, sbi->valid_super_block);
+ bh = sb_bread(sbi->sb, sbi->valid_super_block);
if (!bh)
return -EIO;
err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
@@ -1606,6 +2487,116 @@ int f2fs_commit_super(struct f2fs_sb_inf
return err;
}
+static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
+{
+ struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
+ unsigned int max_devices = MAX_DEVICES;
+ int i;
+
+ /* Initialize single device information */
+ if (!RDEV(0).path[0]) {
+#ifdef CONFIG_BLK_DEV_ZONED
+ if (!bdev_is_zoned(sbi->sb->s_bdev))
+ return 0;
+ max_devices = 1;
+#else
+ return 0;
+#endif
+ }
+
+ /*
+ * Initialize multiple devices information, or single
+ * zoned block device information.
+ */
+ sbi->devs = f2fs_kzalloc(sbi, sizeof(struct f2fs_dev_info) *
+ max_devices, GFP_KERNEL);
+ if (!sbi->devs)
+ return -ENOMEM;
+
+ for (i = 0; i < max_devices; i++) {
+
+ if (i > 0 && !RDEV(i).path[0])
+ break;
+
+ if (max_devices == 1) {
+ /* Single zoned block device mount */
+ FDEV(0).bdev =
+ blkdev_get_by_dev(sbi->sb->s_bdev->bd_dev,
+ sbi->sb->s_mode, sbi->sb->s_type);
+ } else {
+ /* Multi-device mount */
+ memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN);
+ FDEV(i).total_segments =
+ le32_to_cpu(RDEV(i).total_segments);
+ if (i == 0) {
+ FDEV(i).start_blk = 0;
+ FDEV(i).end_blk = FDEV(i).start_blk +
+ (FDEV(i).total_segments <<
+ sbi->log_blocks_per_seg) - 1 +
+ le32_to_cpu(raw_super->segment0_blkaddr);
+ } else {
+ FDEV(i).start_blk = FDEV(i - 1).end_blk + 1;
+ FDEV(i).end_blk = FDEV(i).start_blk +
+ (FDEV(i).total_segments <<
+ sbi->log_blocks_per_seg) - 1;
+ }
+ FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path,
+ sbi->sb->s_mode, sbi->sb->s_type);
+ }
+ if (IS_ERR(FDEV(i).bdev))
+ return PTR_ERR(FDEV(i).bdev);
+
+ /* to release errored devices */
+ sbi->s_ndevs = i + 1;
+
+#ifdef CONFIG_BLK_DEV_ZONED
+ if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM &&
+ !f2fs_sb_has_blkzoned(sbi->sb)) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Zoned block device feature not enabled\n");
+ return -EINVAL;
+ }
+ if (bdev_zoned_model(FDEV(i).bdev) != BLK_ZONED_NONE) {
+ if (init_blkz_info(sbi, i)) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Failed to initialize F2FS blkzone information");
+ return -EINVAL;
+ }
+ if (max_devices == 1)
+ break;
+ f2fs_msg(sbi->sb, KERN_INFO,
+ "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)",
+ i, FDEV(i).path,
+ FDEV(i).total_segments,
+ FDEV(i).start_blk, FDEV(i).end_blk,
+ bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HA ?
+ "Host-aware" : "Host-managed");
+ continue;
+ }
+#endif
+ f2fs_msg(sbi->sb, KERN_INFO,
+ "Mount Device [%2d]: %20s, %8u, %8x - %8x",
+ i, FDEV(i).path,
+ FDEV(i).total_segments,
+ FDEV(i).start_blk, FDEV(i).end_blk);
+ }
+ f2fs_msg(sbi->sb, KERN_INFO,
+ "IO Block Size: %8d KB", F2FS_IO_SIZE_KB(sbi));
+ return 0;
+}
+
+static void f2fs_tuning_parameters(struct f2fs_sb_info *sbi)
+{
+ struct f2fs_sm_info *sm_i = SM_I(sbi);
+
+ /* adjust parameters according to the volume size */
+ if (sm_i->main_segments <= SMALL_VOLUME_SEGMENTS) {
+ F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
+ sm_i->dcc_info->discard_granularity = 1;
+ sm_i->ipu_policy = 1 << F2FS_IPU_FORCE;
+ }
+}
+
static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
{
struct f2fs_sb_info *sbi;
@@ -1653,6 +2644,27 @@ try_onemore:
sb->s_fs_info = sbi;
sbi->raw_super = raw_super;
+ F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
+ F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID);
+
+ /* precompute checksum seed for metadata */
+ if (f2fs_sb_has_inode_chksum(sb))
+ sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid,
+ sizeof(raw_super->uuid));
+
+ /*
+ * The BLKZONED feature indicates that the drive was formatted with
+ * zone alignment optimization. This is optional for host-aware
+ * devices, but mandatory for host-managed zoned block devices.
+ */
+#ifndef CONFIG_BLK_DEV_ZONED
+ if (f2fs_sb_has_blkzoned(sb)) {
+ f2fs_msg(sb, KERN_ERR,
+ "Zoned block device support is not enabled\n");
+ err = -EOPNOTSUPP;
+ goto free_sb_buf;
+ }
+#endif
default_options(sbi);
/* parse mount options */
options = kstrdup((const char *)data, GFP_KERNEL);
@@ -1671,33 +2683,69 @@ try_onemore:
sb->s_max_links = F2FS_LINK_MAX;
get_random_bytes(&sbi->s_next_generation, sizeof(u32));
+#ifdef CONFIG_QUOTA
+ sb->dq_op = &f2fs_quota_operations;
+ if (f2fs_sb_has_quota_ino(sb))
+ sb->s_qcop = &dquot_quotactl_sysfile_ops;
+ else
+ sb->s_qcop = &f2fs_quotactl_ops;
+ sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
+
+ if (f2fs_sb_has_quota_ino(sbi->sb)) {
+ for (i = 0; i < MAXQUOTAS; i++) {
+ if (f2fs_qf_ino(sbi->sb, i))
+ sbi->nquota_files++;
+ }
+ }
+#endif
+
sb->s_op = &f2fs_sops;
+#ifdef CONFIG_F2FS_FS_ENCRYPTION
sb->s_cop = &f2fs_cryptops;
+#endif
sb->s_xattr = f2fs_xattr_handlers;
sb->s_export_op = &f2fs_export_ops;
sb->s_magic = F2FS_SUPER_MAGIC;
sb->s_time_gran = 1;
sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
(test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
- memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
+ memcpy(&sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
+ sb->s_iflags |= SB_I_CGROUPWB;
/* init f2fs-specific super block info */
sbi->valid_super_block = valid_super_block;
mutex_init(&sbi->gc_mutex);
mutex_init(&sbi->cp_mutex);
init_rwsem(&sbi->node_write);
+ init_rwsem(&sbi->node_change);
/* disallow all the data/node/meta page writes */
set_sbi_flag(sbi, SBI_POR_DOING);
spin_lock_init(&sbi->stat_lock);
- init_rwsem(&sbi->read_io.io_rwsem);
- sbi->read_io.sbi = sbi;
- sbi->read_io.bio = NULL;
+ /* init iostat info */
+ spin_lock_init(&sbi->iostat_lock);
+ sbi->iostat_enable = false;
+
for (i = 0; i < NR_PAGE_TYPE; i++) {
- init_rwsem(&sbi->write_io[i].io_rwsem);
- sbi->write_io[i].sbi = sbi;
- sbi->write_io[i].bio = NULL;
+ int n = (i == META) ? 1: NR_TEMP_TYPE;
+ int j;
+
+ sbi->write_io[i] = f2fs_kmalloc(sbi,
+ n * sizeof(struct f2fs_bio_info),
+ GFP_KERNEL);
+ if (!sbi->write_io[i]) {
+ err = -ENOMEM;
+ goto free_options;
+ }
+
+ for (j = HOT; j < n; j++) {
+ init_rwsem(&sbi->write_io[i][j].io_rwsem);
+ sbi->write_io[i][j].sbi = sbi;
+ sbi->write_io[i][j].bio = NULL;
+ spin_lock_init(&sbi->write_io[i][j].io_lock);
+ INIT_LIST_HEAD(&sbi->write_io[i][j].io_list);
+ }
}
init_rwsem(&sbi->cp_rwsem);
@@ -1706,14 +2754,23 @@ try_onemore:
err = init_percpu_info(sbi);
if (err)
- goto free_options;
+ goto free_bio_info;
+
+ if (F2FS_IO_SIZE(sbi) > 1) {
+ sbi->write_io_dummy =
+ mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi) - 1), 0);
+ if (!sbi->write_io_dummy) {
+ err = -ENOMEM;
+ goto free_percpu;
+ }
+ }
/* get an inode for meta space */
sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
if (IS_ERR(sbi->meta_inode)) {
f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode");
err = PTR_ERR(sbi->meta_inode);
- goto free_options;
+ goto free_io_dummy;
}
err = get_valid_checkpoint(sbi);
@@ -1722,6 +2779,13 @@ try_onemore:
goto free_meta_inode;
}
+ /* Initialize device list */
+ err = f2fs_scan_devices(sbi);
+ if (err) {
+ f2fs_msg(sb, KERN_ERR, "Failed to find devices");
+ goto free_devices;
+ }
+
sbi->total_valid_node_count =
le32_to_cpu(sbi->ckpt->valid_node_count);
percpu_counter_set(&sbi->total_valid_inode_count,
@@ -1730,6 +2794,9 @@ try_onemore:
sbi->total_valid_block_count =
le64_to_cpu(sbi->ckpt->valid_block_count);
sbi->last_valid_block_count = sbi->total_valid_block_count;
+ sbi->reserved_blocks = 0;
+ sbi->current_reserved_blocks = 0;
+ limit_reserve_root(sbi);
for (i = 0; i < NR_INODE_TYPE; i++) {
INIT_LIST_HEAD(&sbi->inode_list[i]);
@@ -1775,10 +2842,7 @@ try_onemore:
goto free_nm;
}
- f2fs_join_shrinker(sbi);
-
- /* if there are nt orphan nodes free them */
- err = recover_orphan_inodes(sbi);
+ err = f2fs_build_stats(sbi);
if (err)
goto free_node_inode;
@@ -1787,7 +2851,7 @@ try_onemore:
if (IS_ERR(root)) {
f2fs_msg(sb, KERN_ERR, "Failed to read root inode");
err = PTR_ERR(root);
- goto free_node_inode;
+ goto free_stats;
}
if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
iput(root);
@@ -1801,26 +2865,28 @@ try_onemore:
goto free_root_inode;
}
- err = f2fs_build_stats(sbi);
+ err = f2fs_register_sysfs(sbi);
if (err)
goto free_root_inode;
- if (f2fs_proc_root)
- sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root);
-
- if (sbi->s_proc) {
- proc_create_data("segment_info", S_IRUGO, sbi->s_proc,
- &f2fs_seq_segment_info_fops, sb);
- proc_create_data("segment_bits", S_IRUGO, sbi->s_proc,
- &f2fs_seq_segment_bits_fops, sb);
+#ifdef CONFIG_QUOTA
+ /*
+ * Turn on quotas which were not enabled for read-only mounts if
+ * filesystem has quota feature, so that they are updated correctly.
+ */
+ if (f2fs_sb_has_quota_ino(sb) && !f2fs_readonly(sb)) {
+ err = f2fs_enable_quotas(sb);
+ if (err) {
+ f2fs_msg(sb, KERN_ERR,
+ "Cannot turn on quotas: error %d", err);
+ goto free_sysfs;
+ }
}
-
- sbi->s_kobj.kset = f2fs_kset;
- init_completion(&sbi->s_kobj_unregister);
- err = kobject_init_and_add(&sbi->s_kobj, &f2fs_ktype, NULL,
- "%s", sb->s_id);
+#endif
+ /* if there are nt orphan nodes free them */
+ err = recover_orphan_inodes(sbi);
if (err)
- goto free_proc;
+ goto free_meta;
/* recover fsynced data */
if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
@@ -1831,7 +2897,7 @@ try_onemore:
if (bdev_read_only(sb->s_bdev) &&
!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
err = -EROFS;
- goto free_kobj;
+ goto free_meta;
}
if (need_fsck)
@@ -1845,7 +2911,7 @@ try_onemore:
need_fsck = true;
f2fs_msg(sb, KERN_ERR,
"Cannot recover all fsync data errno=%d", err);
- goto free_kobj;
+ goto free_meta;
}
} else {
err = recover_fsync_data(sbi, true);
@@ -1854,7 +2920,7 @@ try_onemore:
err = -EINVAL;
f2fs_msg(sb, KERN_ERR,
"Need to recover fsync data");
- goto free_kobj;
+ goto free_meta;
}
}
skip_recovery:
@@ -1869,7 +2935,7 @@ skip_recovery:
/* After POR, we can run background GC thread.*/
err = start_gc_thread(sbi);
if (err)
- goto free_kobj;
+ goto free_meta;
}
kfree(options);
@@ -1881,42 +2947,64 @@ skip_recovery:
sbi->valid_super_block ? 1 : 2, err);
}
+ f2fs_join_shrinker(sbi);
+
+ f2fs_tuning_parameters(sbi);
+
+ f2fs_msg(sbi->sb, KERN_NOTICE, "Mounted with checkpoint version = %llx",
+ cur_cp_version(F2FS_CKPT(sbi)));
f2fs_update_time(sbi, CP_TIME);
f2fs_update_time(sbi, REQ_TIME);
return 0;
-free_kobj:
+free_meta:
+#ifdef CONFIG_QUOTA
+ if (f2fs_sb_has_quota_ino(sb) && !f2fs_readonly(sb))
+ f2fs_quota_off_umount(sbi->sb);
+#endif
f2fs_sync_inode_meta(sbi);
- kobject_del(&sbi->s_kobj);
- kobject_put(&sbi->s_kobj);
- wait_for_completion(&sbi->s_kobj_unregister);
-free_proc:
- if (sbi->s_proc) {
- remove_proc_entry("segment_info", sbi->s_proc);
- remove_proc_entry("segment_bits", sbi->s_proc);
- remove_proc_entry(sb->s_id, f2fs_proc_root);
- }
- f2fs_destroy_stats(sbi);
+ /*
+ * Some dirty meta pages can be produced by recover_orphan_inodes()
+ * failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
+ * followed by write_checkpoint() through f2fs_write_node_pages(), which
+ * falls into an infinite loop in sync_meta_pages().
+ */
+ truncate_inode_pages_final(META_MAPPING(sbi));
+#ifdef CONFIG_QUOTA
+free_sysfs:
+#endif
+ f2fs_unregister_sysfs(sbi);
free_root_inode:
dput(sb->s_root);
sb->s_root = NULL;
+free_stats:
+ f2fs_destroy_stats(sbi);
free_node_inode:
- truncate_inode_pages_final(NODE_MAPPING(sbi));
- mutex_lock(&sbi->umount_mutex);
release_ino_entry(sbi, true);
- f2fs_leave_shrinker(sbi);
+ truncate_inode_pages_final(NODE_MAPPING(sbi));
iput(sbi->node_inode);
- mutex_unlock(&sbi->umount_mutex);
free_nm:
destroy_node_manager(sbi);
free_sm:
destroy_segment_manager(sbi);
+free_devices:
+ destroy_device_list(sbi);
kfree(sbi->ckpt);
free_meta_inode:
make_bad_inode(sbi->meta_inode);
iput(sbi->meta_inode);
-free_options:
+free_io_dummy:
+ mempool_destroy(sbi->write_io_dummy);
+free_percpu:
destroy_percpu_info(sbi);
+free_bio_info:
+ for (i = 0; i < NR_PAGE_TYPE; i++)
+ kfree(sbi->write_io[i]);
+free_options:
+#ifdef CONFIG_QUOTA
+ for (i = 0; i < MAXQUOTAS; i++)
+ kfree(F2FS_OPTION(sbi).s_qf_names[i]);
+#endif
kfree(options);
free_sb_buf:
kfree(raw_super);
@@ -1942,8 +3030,11 @@ static struct dentry *f2fs_mount(struct
static void kill_f2fs_super(struct super_block *sb)
{
- if (sb->s_root)
+ if (sb->s_root) {
set_sbi_flag(F2FS_SB(sb), SBI_IS_CLOSE);
+ stop_gc_thread(F2FS_SB(sb));
+ stop_discard_thread(F2FS_SB(sb));
+ }
kill_block_super(sb);
}
@@ -2003,30 +3094,31 @@ static int __init init_f2fs_fs(void)
err = create_extent_cache();
if (err)
goto free_checkpoint_caches;
- f2fs_kset = kset_create_and_add("f2fs", NULL, fs_kobj);
- if (!f2fs_kset) {
- err = -ENOMEM;
+ err = f2fs_init_sysfs();
+ if (err)
goto free_extent_cache;
- }
err = register_shrinker(&f2fs_shrinker_info);
if (err)
- goto free_kset;
-
+ goto free_sysfs;
err = register_filesystem(&f2fs_fs_type);
if (err)
goto free_shrinker;
err = f2fs_create_root_stats();
if (err)
goto free_filesystem;
- f2fs_proc_root = proc_mkdir("fs/f2fs", NULL);
+ err = f2fs_init_post_read_processing();
+ if (err)
+ goto free_root_stats;
return 0;
+free_root_stats:
+ f2fs_destroy_root_stats();
free_filesystem:
unregister_filesystem(&f2fs_fs_type);
free_shrinker:
unregister_shrinker(&f2fs_shrinker_info);
-free_kset:
- kset_unregister(f2fs_kset);
+free_sysfs:
+ f2fs_exit_sysfs();
free_extent_cache:
destroy_extent_cache();
free_checkpoint_caches:
@@ -2043,11 +3135,11 @@ fail:
static void __exit exit_f2fs_fs(void)
{
- remove_proc_entry("fs/f2fs", NULL);
+ f2fs_destroy_post_read_processing();
f2fs_destroy_root_stats();
unregister_filesystem(&f2fs_fs_type);
unregister_shrinker(&f2fs_shrinker_info);
- kset_unregister(f2fs_kset);
+ f2fs_exit_sysfs();
destroy_extent_cache();
destroy_checkpoint_caches();
destroy_segment_manager_caches();
@@ -2062,3 +3154,4 @@ module_exit(exit_f2fs_fs)
MODULE_AUTHOR("Samsung Electronics's Praesto Team");
MODULE_DESCRIPTION("Flash Friendly File System");
MODULE_LICENSE("GPL");
+