Merge branch 'akpm' (patches from Andrew)
Merge more updates from Andrew Morton: "87 patches. Subsystems affected by this patch series: mm (pagecache and hugetlb), procfs, misc, MAINTAINERS, lib, checkpatch, binfmt, kallsyms, ramfs, init, codafs, nilfs2, hfs, crash_dump, signals, seq_file, fork, sysvfs, kcov, gdb, resource, selftests, and ipc" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (87 commits) ipc/ipc_sysctl.c: remove fallback for !CONFIG_PROC_SYSCTL ipc: check checkpoint_restore_ns_capable() to modify C/R proc files selftests/kselftest/runner/run_one(): allow running non-executable files virtio-mem: disallow mapping virtio-mem memory via /dev/mem kernel/resource: disallow access to exclusive system RAM regions kernel/resource: clean up and optimize iomem_is_exclusive() scripts/gdb: handle split debug for vmlinux kcov: replace local_irq_save() with a local_lock_t kcov: avoid enable+disable interrupts if !in_task() kcov: allocate per-CPU memory on the relevant node Documentation/kcov: define `ip' in the example Documentation/kcov: include types.h in the example sysv: use BUILD_BUG_ON instead of runtime check kernel/fork.c: unshare(): use swap() to make code cleaner seq_file: fix passing wrong private data seq_file: move seq_escape() to a header signal: remove duplicate include in signal.h crash_dump: remove duplicate include in crash_dump.h crash_dump: fix boolreturn.cocci warning hfs/hfsplus: use WARN_ON for sanity check ...
This commit is contained in:
		
						commit
						59a2ceeef6
					
				
							
								
								
									
										2
									
								
								.mailmap
									
									
									
									
									
								
							
							
						
						
									
										2
									
								
								.mailmap
									
									
									
									
									
								
							| @ -73,6 +73,8 @@ Chris Chiu <chris.chiu@canonical.com> <chiu@endlessm.com> | ||||
| Chris Chiu <chris.chiu@canonical.com> <chiu@endlessos.org> | ||||
| Christophe Ricard <christophe.ricard@gmail.com> | ||||
| Christoph Hellwig <hch@lst.de> | ||||
| Colin Ian King <colin.king@intel.com> <colin.king@canonical.com> | ||||
| Colin Ian King <colin.king@intel.com> <colin.i.king@gmail.com> | ||||
| Corey Minyard <minyard@acm.org> | ||||
| Damian Hobson-Garcia <dhobsong@igel.co.jp> | ||||
| Daniel Borkmann <daniel@iogearbox.net> <danborkmann@googlemail.com> | ||||
|  | ||||
| @ -50,6 +50,7 @@ program using kcov: | ||||
|     #include <sys/mman.h> | ||||
|     #include <unistd.h> | ||||
|     #include <fcntl.h> | ||||
|     #include <linux/types.h> | ||||
| 
 | ||||
|     #define KCOV_INIT_TRACE			_IOR('c', 1, unsigned long) | ||||
|     #define KCOV_ENABLE			_IO('c', 100) | ||||
| @ -177,6 +178,8 @@ Comparison operands collection is similar to coverage collection: | ||||
| 	/* Read number of comparisons collected. */ | ||||
| 	n = __atomic_load_n(&cover[0], __ATOMIC_RELAXED); | ||||
| 	for (i = 0; i < n; i++) { | ||||
| 		uint64_t ip; | ||||
| 
 | ||||
| 		type = cover[i * KCOV_WORDS_PER_CMP + 1]; | ||||
| 		/* arg1 and arg2 - operands of the comparison. */ | ||||
| 		arg1 = cover[i * KCOV_WORDS_PER_CMP + 2]; | ||||
| @ -251,6 +254,8 @@ selectively from different subsystems. | ||||
| 
 | ||||
| .. code-block:: c | ||||
| 
 | ||||
|     /* Same includes and defines as above. */ | ||||
| 
 | ||||
|     struct kcov_remote_arg { | ||||
| 	__u32		trace_mode; | ||||
| 	__u32		area_size; | ||||
|  | ||||
							
								
								
									
										21
									
								
								MAINTAINERS
									
									
									
									
									
								
							
							
						
						
									
										21
									
								
								MAINTAINERS
									
									
									
									
									
								
							| @ -767,7 +767,7 @@ F:	drivers/crypto/allwinner/ | ||||
| ALLWINNER HARDWARE SPINLOCK SUPPORT | ||||
| M:	Wilken Gottwalt <wilken.gottwalt@posteo.net> | ||||
| S:	Maintained | ||||
| F:	Documentation/devicetree/bindings/hwlock/allwinner,sun6i-hwspinlock.yaml | ||||
| F:	Documentation/devicetree/bindings/hwlock/allwinner,sun6i-a31-hwspinlock.yaml | ||||
| F:	drivers/hwspinlock/sun6i_hwspinlock.c | ||||
| 
 | ||||
| ALLWINNER THERMAL DRIVER | ||||
| @ -2783,7 +2783,7 @@ F:	Documentation/devicetree/bindings/arm/toshiba.yaml | ||||
| F:	Documentation/devicetree/bindings/net/toshiba,visconti-dwmac.yaml | ||||
| F:	Documentation/devicetree/bindings/gpio/toshiba,gpio-visconti.yaml | ||||
| F:	Documentation/devicetree/bindings/pci/toshiba,visconti-pcie.yaml | ||||
| F:	Documentation/devicetree/bindings/pinctrl/toshiba,tmpv7700-pinctrl.yaml | ||||
| F:	Documentation/devicetree/bindings/pinctrl/toshiba,visconti-pinctrl.yaml | ||||
| F:	Documentation/devicetree/bindings/watchdog/toshiba,visconti-wdt.yaml | ||||
| F:	arch/arm64/boot/dts/toshiba/ | ||||
| F:	drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c | ||||
| @ -7119,6 +7119,20 @@ F:	include/uapi/linux/mdio.h | ||||
| F:	include/uapi/linux/mii.h | ||||
| F:	net/core/of_net.c | ||||
| 
 | ||||
| EXEC & BINFMT API | ||||
| R:	Eric Biederman <ebiederm@xmission.com> | ||||
| R:	Kees Cook <keescook@chromium.org> | ||||
| F:	arch/alpha/kernel/binfmt_loader.c | ||||
| F:	arch/x86/ia32/ia32_aout.c | ||||
| F:	fs/*binfmt_*.c | ||||
| F:	fs/exec.c | ||||
| F:	include/linux/binfmts.h | ||||
| F:	include/linux/elf.h | ||||
| F:	include/uapi/linux/binfmts.h | ||||
| F:	tools/testing/selftests/exec/ | ||||
| N:	asm/elf.h | ||||
| N:	binfmt | ||||
| 
 | ||||
| EXFAT FILE SYSTEM | ||||
| M:	Namjae Jeon <linkinjeon@kernel.org> | ||||
| M:	Sungjong Seo <sj1557.seo@samsung.com> | ||||
| @ -8562,7 +8576,6 @@ M:	John Stultz <john.stultz@linaro.org> | ||||
| L:	linux-kernel@vger.kernel.org | ||||
| S:	Maintained | ||||
| F:	drivers/misc/hisi_hikey_usb.c | ||||
| F:	Documentation/devicetree/bindings/misc/hisilicon-hikey-usb.yaml | ||||
| 
 | ||||
| HISILICON PMU DRIVER | ||||
| M:	Shaokun Zhang <zhangshaokun@hisilicon.com> | ||||
| @ -9621,7 +9634,7 @@ INTEL KEEM BAY DRM DRIVER | ||||
| M:	Anitha Chrisanthus <anitha.chrisanthus@intel.com> | ||||
| M:	Edmund Dea <edmund.j.dea@intel.com> | ||||
| S:	Maintained | ||||
| F:	Documentation/devicetree/bindings/display/intel,kmb_display.yaml | ||||
| F:	Documentation/devicetree/bindings/display/intel,keembay-display.yaml | ||||
| F:	drivers/gpu/drm/kmb/ | ||||
| 
 | ||||
| INTEL KEEM BAY OCS AES/SM4 CRYPTO DRIVER | ||||
|  | ||||
| @ -129,9 +129,7 @@ dik_show_trace(unsigned long *sp, const char *loglvl) | ||||
| 		extern char _stext[], _etext[]; | ||||
| 		unsigned long tmp = *sp; | ||||
| 		sp++; | ||||
| 		if (tmp < (unsigned long) &_stext) | ||||
| 			continue; | ||||
| 		if (tmp >= (unsigned long) &_etext) | ||||
| 		if (!is_kernel_text(tmp)) | ||||
| 			continue; | ||||
| 		printk("%s[<%lx>] %pSR\n", loglvl, tmp, (void *)tmp); | ||||
| 		if (i > 40) { | ||||
|  | ||||
| @ -34,6 +34,7 @@ | ||||
| #include <linux/mm_types.h> | ||||
| #include <linux/pgtable.h> | ||||
| #include <linux/memblock.h> | ||||
| #include <linux/kallsyms.h> | ||||
| 
 | ||||
| #include <asm/pgalloc.h> | ||||
| #include <linux/io.h> | ||||
| @ -171,7 +172,7 @@ void __init mapin_ram(void) | ||||
| 	for (s = 0; s < lowmem_size; s += PAGE_SIZE) { | ||||
| 		f = _PAGE_PRESENT | _PAGE_ACCESSED | | ||||
| 				_PAGE_SHARED | _PAGE_HWEXEC; | ||||
| 		if ((char *) v < _stext || (char *) v >= _etext) | ||||
| 		if (!is_kernel_text(v)) | ||||
| 			f |= _PAGE_WRENABLE; | ||||
| 		else | ||||
| 			/* On the MicroBlaze, no user access
 | ||||
|  | ||||
| @ -33,8 +33,6 @@ | ||||
| 
 | ||||
| #include <mm/mmu_decl.h> | ||||
| 
 | ||||
| extern char etext[], _stext[], _sinittext[], _einittext[]; | ||||
| 
 | ||||
| static u8 early_fixmap_pagetable[FIXMAP_PTE_SIZE] __page_aligned_data; | ||||
| 
 | ||||
| notrace void __init early_ioremap_init(void) | ||||
| @ -104,14 +102,13 @@ static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top) | ||||
| { | ||||
| 	unsigned long v, s; | ||||
| 	phys_addr_t p; | ||||
| 	int ktext; | ||||
| 	bool ktext; | ||||
| 
 | ||||
| 	s = offset; | ||||
| 	v = PAGE_OFFSET + s; | ||||
| 	p = memstart_addr + s; | ||||
| 	for (; s < top; s += PAGE_SIZE) { | ||||
| 		ktext = ((char *)v >= _stext && (char *)v < etext) || | ||||
| 			((char *)v >= _sinittext && (char *)v < _einittext); | ||||
| 		ktext = core_kernel_text(v); | ||||
| 		map_kernel_page(v, p, ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL); | ||||
| 		v += PAGE_SIZE; | ||||
| 		p += PAGE_SIZE; | ||||
|  | ||||
| @ -4,10 +4,14 @@ | ||||
|  */ | ||||
| 
 | ||||
| #include <linux/delay.h> | ||||
| #include <linux/math.h> | ||||
| #include <linux/param.h> | ||||
| #include <linux/timex.h> | ||||
| #include <linux/types.h> | ||||
| #include <linux/export.h> | ||||
| 
 | ||||
| #include <asm/processor.h> | ||||
| 
 | ||||
| /*
 | ||||
|  * This is copies from arch/arm/include/asm/delay.h | ||||
|  * | ||||
|  | ||||
| @ -9,8 +9,12 @@ | ||||
| #define __ASM_FACILITY_H | ||||
| 
 | ||||
| #include <asm/facility-defs.h> | ||||
| 
 | ||||
| #include <linux/minmax.h> | ||||
| #include <linux/string.h> | ||||
| #include <linux/types.h> | ||||
| #include <linux/preempt.h> | ||||
| 
 | ||||
| #include <asm/lowcore.h> | ||||
| 
 | ||||
| #define MAX_FACILITY_BIT (sizeof(stfle_fac_list) * 8) | ||||
|  | ||||
| @ -73,12 +73,23 @@ static int gart_mem_pfn_is_ram(unsigned long pfn) | ||||
| 		      (pfn >= aperture_pfn_start + aperture_page_count)); | ||||
| } | ||||
| 
 | ||||
| #ifdef CONFIG_PROC_VMCORE | ||||
| static bool gart_oldmem_pfn_is_ram(struct vmcore_cb *cb, unsigned long pfn) | ||||
| { | ||||
| 	return !!gart_mem_pfn_is_ram(pfn); | ||||
| } | ||||
| 
 | ||||
| static struct vmcore_cb gart_vmcore_cb = { | ||||
| 	.pfn_is_ram = gart_oldmem_pfn_is_ram, | ||||
| }; | ||||
| #endif | ||||
| 
 | ||||
| static void __init exclude_from_core(u64 aper_base, u32 aper_order) | ||||
| { | ||||
| 	aperture_pfn_start = aper_base >> PAGE_SHIFT; | ||||
| 	aperture_page_count = (32 * 1024 * 1024) << aper_order >> PAGE_SHIFT; | ||||
| #ifdef CONFIG_PROC_VMCORE | ||||
| 	WARN_ON(register_oldmem_pfn_is_ram(&gart_mem_pfn_is_ram)); | ||||
| 	register_vmcore_cb(&gart_vmcore_cb); | ||||
| #endif | ||||
| #ifdef CONFIG_PROC_KCORE | ||||
| 	WARN_ON(register_mem_pfn_is_ram(&gart_mem_pfn_is_ram)); | ||||
|  | ||||
| @ -175,7 +175,7 @@ static struct orc_entry *orc_find(unsigned long ip) | ||||
| 	} | ||||
| 
 | ||||
| 	/* vmlinux .init slow lookup: */ | ||||
| 	if (init_kernel_text(ip)) | ||||
| 	if (is_kernel_inittext(ip)) | ||||
| 		return __orc_find(__start_orc_unwind_ip, __start_orc_unwind, | ||||
| 				  __stop_orc_unwind_ip - __start_orc_unwind_ip, ip); | ||||
| 
 | ||||
|  | ||||
| @ -238,11 +238,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * The <linux/kallsyms.h> already defines is_kernel_text, | ||||
|  * using '__' prefix not to get in conflict. | ||||
|  */ | ||||
| static inline int __is_kernel_text(unsigned long addr) | ||||
| static inline int is_x86_32_kernel_text(unsigned long addr) | ||||
| { | ||||
| 	if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end) | ||||
| 		return 1; | ||||
| @ -333,8 +329,8 @@ repeat: | ||||
| 				addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE + | ||||
| 					PAGE_OFFSET + PAGE_SIZE-1; | ||||
| 
 | ||||
| 				if (__is_kernel_text(addr) || | ||||
| 				    __is_kernel_text(addr2)) | ||||
| 				if (is_x86_32_kernel_text(addr) || | ||||
| 				    is_x86_32_kernel_text(addr2)) | ||||
| 					prot = PAGE_KERNEL_LARGE_EXEC; | ||||
| 
 | ||||
| 				pages_2m++; | ||||
| @ -359,7 +355,7 @@ repeat: | ||||
| 				 */ | ||||
| 				pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR); | ||||
| 
 | ||||
| 				if (__is_kernel_text(addr)) | ||||
| 				if (is_x86_32_kernel_text(addr)) | ||||
| 					prot = PAGE_KERNEL_EXEC; | ||||
| 
 | ||||
| 				pages_4k++; | ||||
| @ -789,7 +785,7 @@ static void mark_nxdata_nx(void) | ||||
| 	 */ | ||||
| 	unsigned long start = PFN_ALIGN(_etext); | ||||
| 	/*
 | ||||
| 	 * This comes from __is_kernel_text upper limit. Also HPAGE where used: | ||||
| 	 * This comes from is_x86_32_kernel_text upper limit. Also HPAGE where used: | ||||
| 	 */ | ||||
| 	unsigned long size = (((unsigned long)__init_end + HPAGE_SIZE) & HPAGE_MASK) - start; | ||||
| 
 | ||||
|  | ||||
| @ -9,39 +9,28 @@ | ||||
| 
 | ||||
| #ifdef CONFIG_PROC_VMCORE | ||||
| /*
 | ||||
|  * This function is used in two contexts: | ||||
|  * - the kdump kernel has to check whether a pfn of the crashed kernel | ||||
|  *   was a ballooned page. vmcore is using this function to decide | ||||
|  *   whether to access a pfn of the crashed kernel. | ||||
|  * - the kexec kernel has to check whether a pfn was ballooned by the | ||||
|  *   previous kernel. If the pfn is ballooned, handle it properly. | ||||
|  * Returns 0 if the pfn is not backed by a RAM page, the caller may | ||||
|  * The kdump kernel has to check whether a pfn of the crashed kernel | ||||
|  * was a ballooned page. vmcore is using this function to decide | ||||
|  * whether to access a pfn of the crashed kernel. | ||||
|  * Returns "false" if the pfn is not backed by a RAM page, the caller may | ||||
|  * handle the pfn special in this case. | ||||
|  */ | ||||
| static int xen_oldmem_pfn_is_ram(unsigned long pfn) | ||||
| static bool xen_vmcore_pfn_is_ram(struct vmcore_cb *cb, unsigned long pfn) | ||||
| { | ||||
| 	struct xen_hvm_get_mem_type a = { | ||||
| 		.domid = DOMID_SELF, | ||||
| 		.pfn = pfn, | ||||
| 	}; | ||||
| 	int ram; | ||||
| 
 | ||||
| 	if (HYPERVISOR_hvm_op(HVMOP_get_mem_type, &a)) | ||||
| 		return -ENXIO; | ||||
| 
 | ||||
| 	switch (a.mem_type) { | ||||
| 	case HVMMEM_mmio_dm: | ||||
| 		ram = 0; | ||||
| 		break; | ||||
| 	case HVMMEM_ram_rw: | ||||
| 	case HVMMEM_ram_ro: | ||||
| 	default: | ||||
| 		ram = 1; | ||||
| 		break; | ||||
| 	if (HYPERVISOR_hvm_op(HVMOP_get_mem_type, &a)) { | ||||
| 		pr_warn_once("Unexpected HVMOP_get_mem_type failure\n"); | ||||
| 		return true; | ||||
| 	} | ||||
| 
 | ||||
| 	return ram; | ||||
| 	return a.mem_type != HVMMEM_mmio_dm; | ||||
| } | ||||
| static struct vmcore_cb xen_vmcore_cb = { | ||||
| 	.pfn_is_ram = xen_vmcore_pfn_is_ram, | ||||
| }; | ||||
| #endif | ||||
| 
 | ||||
| static void xen_hvm_exit_mmap(struct mm_struct *mm) | ||||
| @ -75,6 +64,6 @@ void __init xen_hvm_init_mmu_ops(void) | ||||
| 	if (is_pagetable_dying_supported()) | ||||
| 		pv_ops.mmu.exit_mmap = xen_hvm_exit_mmap; | ||||
| #ifdef CONFIG_PROC_VMCORE | ||||
| 	WARN_ON(register_oldmem_pfn_is_ram(&xen_oldmem_pfn_is_ram)); | ||||
| 	register_vmcore_cb(&xen_vmcore_cb); | ||||
| #endif | ||||
| } | ||||
|  | ||||
| @ -1668,13 +1668,10 @@ __dump_topology_ref_history(struct drm_dp_mst_topology_ref_history *history, | ||||
| 	for (i = 0; i < history->len; i++) { | ||||
| 		const struct drm_dp_mst_topology_ref_entry *entry = | ||||
| 			&history->entries[i]; | ||||
| 		ulong *entries; | ||||
| 		uint nr_entries; | ||||
| 		u64 ts_nsec = entry->ts_nsec; | ||||
| 		u32 rem_nsec = do_div(ts_nsec, 1000000000); | ||||
| 
 | ||||
| 		nr_entries = stack_depot_fetch(entry->backtrace, &entries); | ||||
| 		stack_trace_snprint(buf, PAGE_SIZE, entries, nr_entries, 4); | ||||
| 		stack_depot_snprint(entry->backtrace, buf, PAGE_SIZE, 4); | ||||
| 
 | ||||
| 		drm_printf(&p, "  %d %ss (last at %5llu.%06u):\n%s", | ||||
| 			   entry->count, | ||||
|  | ||||
| @ -118,8 +118,6 @@ static noinline void save_stack(struct drm_mm_node *node) | ||||
| static void show_leaks(struct drm_mm *mm) | ||||
| { | ||||
| 	struct drm_mm_node *node; | ||||
| 	unsigned long *entries; | ||||
| 	unsigned int nr_entries; | ||||
| 	char *buf; | ||||
| 
 | ||||
| 	buf = kmalloc(BUFSZ, GFP_KERNEL); | ||||
| @ -133,8 +131,7 @@ static void show_leaks(struct drm_mm *mm) | ||||
| 			continue; | ||||
| 		} | ||||
| 
 | ||||
| 		nr_entries = stack_depot_fetch(node->stack, &entries); | ||||
| 		stack_trace_snprint(buf, BUFSZ, entries, nr_entries, 0); | ||||
| 		stack_depot_snprint(node->stack, buf, BUFSZ, 0); | ||||
| 		DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s", | ||||
| 			  node->start, node->size, buf); | ||||
| 	} | ||||
|  | ||||
| @ -56,8 +56,6 @@ void i915_vma_free(struct i915_vma *vma) | ||||
| 
 | ||||
| static void vma_print_allocator(struct i915_vma *vma, const char *reason) | ||||
| { | ||||
| 	unsigned long *entries; | ||||
| 	unsigned int nr_entries; | ||||
| 	char buf[512]; | ||||
| 
 | ||||
| 	if (!vma->node.stack) { | ||||
| @ -66,8 +64,7 @@ static void vma_print_allocator(struct i915_vma *vma, const char *reason) | ||||
| 		return; | ||||
| 	} | ||||
| 
 | ||||
| 	nr_entries = stack_depot_fetch(vma->node.stack, &entries); | ||||
| 	stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0); | ||||
| 	stack_depot_snprint(vma->node.stack, buf, sizeof(buf), 0); | ||||
| 	DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n", | ||||
| 			 vma->node.start, vma->node.size, reason, buf); | ||||
| } | ||||
|  | ||||
| @ -65,16 +65,6 @@ static noinline depot_stack_handle_t __save_depot_stack(void) | ||||
| 	return stack_depot_save(entries, n, GFP_NOWAIT | __GFP_NOWARN); | ||||
| } | ||||
| 
 | ||||
| static void __print_depot_stack(depot_stack_handle_t stack, | ||||
| 				char *buf, int sz, int indent) | ||||
| { | ||||
| 	unsigned long *entries; | ||||
| 	unsigned int nr_entries; | ||||
| 
 | ||||
| 	nr_entries = stack_depot_fetch(stack, &entries); | ||||
| 	stack_trace_snprint(buf, sz, entries, nr_entries, indent); | ||||
| } | ||||
| 
 | ||||
| static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm) | ||||
| { | ||||
| 	spin_lock_init(&rpm->debug.lock); | ||||
| @ -146,12 +136,12 @@ static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm, | ||||
| 		if (!buf) | ||||
| 			return; | ||||
| 
 | ||||
| 		__print_depot_stack(stack, buf, PAGE_SIZE, 2); | ||||
| 		stack_depot_snprint(stack, buf, PAGE_SIZE, 2); | ||||
| 		DRM_DEBUG_DRIVER("wakeref %x from\n%s", stack, buf); | ||||
| 
 | ||||
| 		stack = READ_ONCE(rpm->debug.last_release); | ||||
| 		if (stack) { | ||||
| 			__print_depot_stack(stack, buf, PAGE_SIZE, 2); | ||||
| 			stack_depot_snprint(stack, buf, PAGE_SIZE, 2); | ||||
| 			DRM_DEBUG_DRIVER("wakeref last released at\n%s", buf); | ||||
| 		} | ||||
| 
 | ||||
| @ -183,12 +173,12 @@ __print_intel_runtime_pm_wakeref(struct drm_printer *p, | ||||
| 		return; | ||||
| 
 | ||||
| 	if (dbg->last_acquire) { | ||||
| 		__print_depot_stack(dbg->last_acquire, buf, PAGE_SIZE, 2); | ||||
| 		stack_depot_snprint(dbg->last_acquire, buf, PAGE_SIZE, 2); | ||||
| 		drm_printf(p, "Wakeref last acquired:\n%s", buf); | ||||
| 	} | ||||
| 
 | ||||
| 	if (dbg->last_release) { | ||||
| 		__print_depot_stack(dbg->last_release, buf, PAGE_SIZE, 2); | ||||
| 		stack_depot_snprint(dbg->last_release, buf, PAGE_SIZE, 2); | ||||
| 		drm_printf(p, "Wakeref last released:\n%s", buf); | ||||
| 	} | ||||
| 
 | ||||
| @ -203,7 +193,7 @@ __print_intel_runtime_pm_wakeref(struct drm_printer *p, | ||||
| 		rep = 1; | ||||
| 		while (i + 1 < dbg->count && dbg->owners[i + 1] == stack) | ||||
| 			rep++, i++; | ||||
| 		__print_depot_stack(stack, buf, PAGE_SIZE, 2); | ||||
| 		stack_depot_snprint(stack, buf, PAGE_SIZE, 2); | ||||
| 		drm_printf(p, "Wakeref x%lu taken at:\n%s", rep, buf); | ||||
| 	} | ||||
| 
 | ||||
|  | ||||
| @ -12,6 +12,7 @@ | ||||
| #include <linux/types.h> | ||||
| #include <linux/errno.h> | ||||
| #include <linux/delay.h> | ||||
| #include <linux/bits.h> | ||||
| #include <linux/string.h> | ||||
| 
 | ||||
| int cxd2880_convert2s_complement(u32 value, u32 bitlen); | ||||
|  | ||||
| @ -111,6 +111,7 @@ config VIRTIO_MEM | ||||
| 	depends on MEMORY_HOTPLUG | ||||
| 	depends on MEMORY_HOTREMOVE | ||||
| 	depends on CONTIG_ALLOC | ||||
| 	depends on EXCLUSIVE_SYSTEM_RAM | ||||
| 	help | ||||
| 	 This driver provides access to virtio-mem paravirtualized memory | ||||
| 	 devices, allowing to hotplug and hotunplug memory. | ||||
|  | ||||
| @ -223,6 +223,9 @@ struct virtio_mem { | ||||
| 	 * When this lock is held the pointers can't change, ONLINE and | ||||
| 	 * OFFLINE blocks can't change the state and no subblocks will get | ||||
| 	 * plugged/unplugged. | ||||
| 	 * | ||||
| 	 * In kdump mode, used to serialize requests, last_block_addr and | ||||
| 	 * last_block_plugged. | ||||
| 	 */ | ||||
| 	struct mutex hotplug_mutex; | ||||
| 	bool hotplug_active; | ||||
| @ -230,6 +233,9 @@ struct virtio_mem { | ||||
| 	/* An error occurred we cannot handle - stop processing requests. */ | ||||
| 	bool broken; | ||||
| 
 | ||||
| 	/* Cached valued of is_kdump_kernel() when the device was probed. */ | ||||
| 	bool in_kdump; | ||||
| 
 | ||||
| 	/* The driver is being removed. */ | ||||
| 	spinlock_t removal_lock; | ||||
| 	bool removing; | ||||
| @ -243,6 +249,13 @@ struct virtio_mem { | ||||
| 	/* Memory notifier (online/offline events). */ | ||||
| 	struct notifier_block memory_notifier; | ||||
| 
 | ||||
| #ifdef CONFIG_PROC_VMCORE | ||||
| 	/* vmcore callback for /proc/vmcore handling in kdump mode */ | ||||
| 	struct vmcore_cb vmcore_cb; | ||||
| 	uint64_t last_block_addr; | ||||
| 	bool last_block_plugged; | ||||
| #endif /* CONFIG_PROC_VMCORE */ | ||||
| 
 | ||||
| 	/* Next device in the list of virtio-mem devices. */ | ||||
| 	struct list_head next; | ||||
| }; | ||||
| @ -260,6 +273,8 @@ static void virtio_mem_fake_offline_going_offline(unsigned long pfn, | ||||
| static void virtio_mem_fake_offline_cancel_offline(unsigned long pfn, | ||||
| 						   unsigned long nr_pages); | ||||
| static void virtio_mem_retry(struct virtio_mem *vm); | ||||
| static int virtio_mem_create_resource(struct virtio_mem *vm); | ||||
| static void virtio_mem_delete_resource(struct virtio_mem *vm); | ||||
| 
 | ||||
| /*
 | ||||
|  * Register a virtio-mem device so it will be considered for the online_page | ||||
| @ -2291,6 +2306,12 @@ static void virtio_mem_run_wq(struct work_struct *work) | ||||
| 	uint64_t diff; | ||||
| 	int rc; | ||||
| 
 | ||||
| 	if (unlikely(vm->in_kdump)) { | ||||
| 		dev_warn_once(&vm->vdev->dev, | ||||
| 			     "unexpected workqueue run in kdump kernel\n"); | ||||
| 		return; | ||||
| 	} | ||||
| 
 | ||||
| 	hrtimer_cancel(&vm->retry_timer); | ||||
| 
 | ||||
| 	if (vm->broken) | ||||
| @ -2392,41 +2413,11 @@ static int virtio_mem_init_vq(struct virtio_mem *vm) | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| static int virtio_mem_init(struct virtio_mem *vm) | ||||
| static int virtio_mem_init_hotplug(struct virtio_mem *vm) | ||||
| { | ||||
| 	const struct range pluggable_range = mhp_get_pluggable_range(true); | ||||
| 	uint64_t sb_size, addr; | ||||
| 	uint16_t node_id; | ||||
| 
 | ||||
| 	if (!vm->vdev->config->get) { | ||||
| 		dev_err(&vm->vdev->dev, "config access disabled\n"); | ||||
| 		return -EINVAL; | ||||
| 	} | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * We don't want to (un)plug or reuse any memory when in kdump. The | ||||
| 	 * memory is still accessible (but not mapped). | ||||
| 	 */ | ||||
| 	if (is_kdump_kernel()) { | ||||
| 		dev_warn(&vm->vdev->dev, "disabled in kdump kernel\n"); | ||||
| 		return -EBUSY; | ||||
| 	} | ||||
| 
 | ||||
| 	/* Fetch all properties that can't change. */ | ||||
| 	virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size, | ||||
| 			&vm->plugged_size); | ||||
| 	virtio_cread_le(vm->vdev, struct virtio_mem_config, block_size, | ||||
| 			&vm->device_block_size); | ||||
| 	virtio_cread_le(vm->vdev, struct virtio_mem_config, node_id, | ||||
| 			&node_id); | ||||
| 	vm->nid = virtio_mem_translate_node_id(vm, node_id); | ||||
| 	virtio_cread_le(vm->vdev, struct virtio_mem_config, addr, &vm->addr); | ||||
| 	virtio_cread_le(vm->vdev, struct virtio_mem_config, region_size, | ||||
| 			&vm->region_size); | ||||
| 
 | ||||
| 	/* Determine the nid for the device based on the lowest address. */ | ||||
| 	if (vm->nid == NUMA_NO_NODE) | ||||
| 		vm->nid = memory_add_physaddr_to_nid(vm->addr); | ||||
| 	uint64_t unit_pages, sb_size, addr; | ||||
| 	int rc; | ||||
| 
 | ||||
| 	/* bad device setup - warn only */ | ||||
| 	if (!IS_ALIGNED(vm->addr, memory_block_size_bytes())) | ||||
| @ -2496,10 +2487,6 @@ static int virtio_mem_init(struct virtio_mem *vm) | ||||
| 					      vm->offline_threshold); | ||||
| 	} | ||||
| 
 | ||||
| 	dev_info(&vm->vdev->dev, "start address: 0x%llx", vm->addr); | ||||
| 	dev_info(&vm->vdev->dev, "region size: 0x%llx", vm->region_size); | ||||
| 	dev_info(&vm->vdev->dev, "device block size: 0x%llx", | ||||
| 		 (unsigned long long)vm->device_block_size); | ||||
| 	dev_info(&vm->vdev->dev, "memory block size: 0x%lx", | ||||
| 		 memory_block_size_bytes()); | ||||
| 	if (vm->in_sbm) | ||||
| @ -2508,10 +2495,170 @@ static int virtio_mem_init(struct virtio_mem *vm) | ||||
| 	else | ||||
| 		dev_info(&vm->vdev->dev, "big block size: 0x%llx", | ||||
| 			 (unsigned long long)vm->bbm.bb_size); | ||||
| 
 | ||||
| 	/* create the parent resource for all memory */ | ||||
| 	rc = virtio_mem_create_resource(vm); | ||||
| 	if (rc) | ||||
| 		return rc; | ||||
| 
 | ||||
| 	/* use a single dynamic memory group to cover the whole memory device */ | ||||
| 	if (vm->in_sbm) | ||||
| 		unit_pages = PHYS_PFN(memory_block_size_bytes()); | ||||
| 	else | ||||
| 		unit_pages = PHYS_PFN(vm->bbm.bb_size); | ||||
| 	rc = memory_group_register_dynamic(vm->nid, unit_pages); | ||||
| 	if (rc < 0) | ||||
| 		goto out_del_resource; | ||||
| 	vm->mgid = rc; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * If we still have memory plugged, we have to unplug all memory first. | ||||
| 	 * Registering our parent resource makes sure that this memory isn't | ||||
| 	 * actually in use (e.g., trying to reload the driver). | ||||
| 	 */ | ||||
| 	if (vm->plugged_size) { | ||||
| 		vm->unplug_all_required = true; | ||||
| 		dev_info(&vm->vdev->dev, "unplugging all memory is required\n"); | ||||
| 	} | ||||
| 
 | ||||
| 	/* register callbacks */ | ||||
| 	vm->memory_notifier.notifier_call = virtio_mem_memory_notifier_cb; | ||||
| 	rc = register_memory_notifier(&vm->memory_notifier); | ||||
| 	if (rc) | ||||
| 		goto out_unreg_group; | ||||
| 	rc = register_virtio_mem_device(vm); | ||||
| 	if (rc) | ||||
| 		goto out_unreg_mem; | ||||
| 
 | ||||
| 	return 0; | ||||
| out_unreg_mem: | ||||
| 	unregister_memory_notifier(&vm->memory_notifier); | ||||
| out_unreg_group: | ||||
| 	memory_group_unregister(vm->mgid); | ||||
| out_del_resource: | ||||
| 	virtio_mem_delete_resource(vm); | ||||
| 	return rc; | ||||
| } | ||||
| 
 | ||||
| #ifdef CONFIG_PROC_VMCORE | ||||
| static int virtio_mem_send_state_request(struct virtio_mem *vm, uint64_t addr, | ||||
| 					 uint64_t size) | ||||
| { | ||||
| 	const uint64_t nb_vm_blocks = size / vm->device_block_size; | ||||
| 	const struct virtio_mem_req req = { | ||||
| 		.type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_STATE), | ||||
| 		.u.state.addr = cpu_to_virtio64(vm->vdev, addr), | ||||
| 		.u.state.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks), | ||||
| 	}; | ||||
| 	int rc = -ENOMEM; | ||||
| 
 | ||||
| 	dev_dbg(&vm->vdev->dev, "requesting state: 0x%llx - 0x%llx\n", addr, | ||||
| 		addr + size - 1); | ||||
| 
 | ||||
| 	switch (virtio_mem_send_request(vm, &req)) { | ||||
| 	case VIRTIO_MEM_RESP_ACK: | ||||
| 		return virtio16_to_cpu(vm->vdev, vm->resp.u.state.state); | ||||
| 	case VIRTIO_MEM_RESP_ERROR: | ||||
| 		rc = -EINVAL; | ||||
| 		break; | ||||
| 	default: | ||||
| 		break; | ||||
| 	} | ||||
| 
 | ||||
| 	dev_dbg(&vm->vdev->dev, "requesting state failed: %d\n", rc); | ||||
| 	return rc; | ||||
| } | ||||
| 
 | ||||
| static bool virtio_mem_vmcore_pfn_is_ram(struct vmcore_cb *cb, | ||||
| 					 unsigned long pfn) | ||||
| { | ||||
| 	struct virtio_mem *vm = container_of(cb, struct virtio_mem, | ||||
| 					     vmcore_cb); | ||||
| 	uint64_t addr = PFN_PHYS(pfn); | ||||
| 	bool is_ram; | ||||
| 	int rc; | ||||
| 
 | ||||
| 	if (!virtio_mem_contains_range(vm, addr, PAGE_SIZE)) | ||||
| 		return true; | ||||
| 	if (!vm->plugged_size) | ||||
| 		return false; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * We have to serialize device requests and access to the information | ||||
| 	 * about the block queried last. | ||||
| 	 */ | ||||
| 	mutex_lock(&vm->hotplug_mutex); | ||||
| 
 | ||||
| 	addr = ALIGN_DOWN(addr, vm->device_block_size); | ||||
| 	if (addr != vm->last_block_addr) { | ||||
| 		rc = virtio_mem_send_state_request(vm, addr, | ||||
| 						   vm->device_block_size); | ||||
| 		/* On any kind of error, we're going to signal !ram. */ | ||||
| 		if (rc == VIRTIO_MEM_STATE_PLUGGED) | ||||
| 			vm->last_block_plugged = true; | ||||
| 		else | ||||
| 			vm->last_block_plugged = false; | ||||
| 		vm->last_block_addr = addr; | ||||
| 	} | ||||
| 
 | ||||
| 	is_ram = vm->last_block_plugged; | ||||
| 	mutex_unlock(&vm->hotplug_mutex); | ||||
| 	return is_ram; | ||||
| } | ||||
| #endif /* CONFIG_PROC_VMCORE */ | ||||
| 
 | ||||
| static int virtio_mem_init_kdump(struct virtio_mem *vm) | ||||
| { | ||||
| #ifdef CONFIG_PROC_VMCORE | ||||
| 	dev_info(&vm->vdev->dev, "memory hot(un)plug disabled in kdump kernel\n"); | ||||
| 	vm->vmcore_cb.pfn_is_ram = virtio_mem_vmcore_pfn_is_ram; | ||||
| 	register_vmcore_cb(&vm->vmcore_cb); | ||||
| 	return 0; | ||||
| #else /* CONFIG_PROC_VMCORE */ | ||||
| 	dev_warn(&vm->vdev->dev, "disabled in kdump kernel without vmcore\n"); | ||||
| 	return -EBUSY; | ||||
| #endif /* CONFIG_PROC_VMCORE */ | ||||
| } | ||||
| 
 | ||||
| static int virtio_mem_init(struct virtio_mem *vm) | ||||
| { | ||||
| 	uint16_t node_id; | ||||
| 
 | ||||
| 	if (!vm->vdev->config->get) { | ||||
| 		dev_err(&vm->vdev->dev, "config access disabled\n"); | ||||
| 		return -EINVAL; | ||||
| 	} | ||||
| 
 | ||||
| 	/* Fetch all properties that can't change. */ | ||||
| 	virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size, | ||||
| 			&vm->plugged_size); | ||||
| 	virtio_cread_le(vm->vdev, struct virtio_mem_config, block_size, | ||||
| 			&vm->device_block_size); | ||||
| 	virtio_cread_le(vm->vdev, struct virtio_mem_config, node_id, | ||||
| 			&node_id); | ||||
| 	vm->nid = virtio_mem_translate_node_id(vm, node_id); | ||||
| 	virtio_cread_le(vm->vdev, struct virtio_mem_config, addr, &vm->addr); | ||||
| 	virtio_cread_le(vm->vdev, struct virtio_mem_config, region_size, | ||||
| 			&vm->region_size); | ||||
| 
 | ||||
| 	/* Determine the nid for the device based on the lowest address. */ | ||||
| 	if (vm->nid == NUMA_NO_NODE) | ||||
| 		vm->nid = memory_add_physaddr_to_nid(vm->addr); | ||||
| 
 | ||||
| 	dev_info(&vm->vdev->dev, "start address: 0x%llx", vm->addr); | ||||
| 	dev_info(&vm->vdev->dev, "region size: 0x%llx", vm->region_size); | ||||
| 	dev_info(&vm->vdev->dev, "device block size: 0x%llx", | ||||
| 		 (unsigned long long)vm->device_block_size); | ||||
| 	if (vm->nid != NUMA_NO_NODE && IS_ENABLED(CONFIG_NUMA)) | ||||
| 		dev_info(&vm->vdev->dev, "nid: %d", vm->nid); | ||||
| 
 | ||||
| 	return 0; | ||||
| 	/*
 | ||||
| 	 * We don't want to (un)plug or reuse any memory when in kdump. The | ||||
| 	 * memory is still accessible (but not exposed to Linux). | ||||
| 	 */ | ||||
| 	if (vm->in_kdump) | ||||
| 		return virtio_mem_init_kdump(vm); | ||||
| 	return virtio_mem_init_hotplug(vm); | ||||
| } | ||||
| 
 | ||||
| static int virtio_mem_create_resource(struct virtio_mem *vm) | ||||
| @ -2525,8 +2672,10 @@ static int virtio_mem_create_resource(struct virtio_mem *vm) | ||||
| 	if (!name) | ||||
| 		return -ENOMEM; | ||||
| 
 | ||||
| 	/* Disallow mapping device memory via /dev/mem completely. */ | ||||
| 	vm->parent_resource = __request_mem_region(vm->addr, vm->region_size, | ||||
| 						   name, IORESOURCE_SYSTEM_RAM); | ||||
| 						   name, IORESOURCE_SYSTEM_RAM | | ||||
| 						   IORESOURCE_EXCLUSIVE); | ||||
| 	if (!vm->parent_resource) { | ||||
| 		kfree(name); | ||||
| 		dev_warn(&vm->vdev->dev, "could not reserve device region\n"); | ||||
| @ -2571,7 +2720,6 @@ static bool virtio_mem_has_memory_added(struct virtio_mem *vm) | ||||
| static int virtio_mem_probe(struct virtio_device *vdev) | ||||
| { | ||||
| 	struct virtio_mem *vm; | ||||
| 	uint64_t unit_pages; | ||||
| 	int rc; | ||||
| 
 | ||||
| 	BUILD_BUG_ON(sizeof(struct virtio_mem_req) != 24); | ||||
| @ -2590,6 +2738,7 @@ static int virtio_mem_probe(struct virtio_device *vdev) | ||||
| 	hrtimer_init(&vm->retry_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||||
| 	vm->retry_timer.function = virtio_mem_timer_expired; | ||||
| 	vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS; | ||||
| 	vm->in_kdump = is_kdump_kernel(); | ||||
| 
 | ||||
| 	/* register the virtqueue */ | ||||
| 	rc = virtio_mem_init_vq(vm); | ||||
| @ -2601,53 +2750,15 @@ static int virtio_mem_probe(struct virtio_device *vdev) | ||||
| 	if (rc) | ||||
| 		goto out_del_vq; | ||||
| 
 | ||||
| 	/* create the parent resource for all memory */ | ||||
| 	rc = virtio_mem_create_resource(vm); | ||||
| 	if (rc) | ||||
| 		goto out_del_vq; | ||||
| 
 | ||||
| 	/* use a single dynamic memory group to cover the whole memory device */ | ||||
| 	if (vm->in_sbm) | ||||
| 		unit_pages = PHYS_PFN(memory_block_size_bytes()); | ||||
| 	else | ||||
| 		unit_pages = PHYS_PFN(vm->bbm.bb_size); | ||||
| 	rc = memory_group_register_dynamic(vm->nid, unit_pages); | ||||
| 	if (rc < 0) | ||||
| 		goto out_del_resource; | ||||
| 	vm->mgid = rc; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * If we still have memory plugged, we have to unplug all memory first. | ||||
| 	 * Registering our parent resource makes sure that this memory isn't | ||||
| 	 * actually in use (e.g., trying to reload the driver). | ||||
| 	 */ | ||||
| 	if (vm->plugged_size) { | ||||
| 		vm->unplug_all_required = true; | ||||
| 		dev_info(&vm->vdev->dev, "unplugging all memory is required\n"); | ||||
| 	} | ||||
| 
 | ||||
| 	/* register callbacks */ | ||||
| 	vm->memory_notifier.notifier_call = virtio_mem_memory_notifier_cb; | ||||
| 	rc = register_memory_notifier(&vm->memory_notifier); | ||||
| 	if (rc) | ||||
| 		goto out_unreg_group; | ||||
| 	rc = register_virtio_mem_device(vm); | ||||
| 	if (rc) | ||||
| 		goto out_unreg_mem; | ||||
| 
 | ||||
| 	virtio_device_ready(vdev); | ||||
| 
 | ||||
| 	/* trigger a config update to start processing the requested_size */ | ||||
| 	atomic_set(&vm->config_changed, 1); | ||||
| 	queue_work(system_freezable_wq, &vm->wq); | ||||
| 	if (!vm->in_kdump) { | ||||
| 		atomic_set(&vm->config_changed, 1); | ||||
| 		queue_work(system_freezable_wq, &vm->wq); | ||||
| 	} | ||||
| 
 | ||||
| 	return 0; | ||||
| out_unreg_mem: | ||||
| 	unregister_memory_notifier(&vm->memory_notifier); | ||||
| out_unreg_group: | ||||
| 	memory_group_unregister(vm->mgid); | ||||
| out_del_resource: | ||||
| 	virtio_mem_delete_resource(vm); | ||||
| out_del_vq: | ||||
| 	vdev->config->del_vqs(vdev); | ||||
| out_free_vm: | ||||
| @ -2657,9 +2768,8 @@ out_free_vm: | ||||
| 	return rc; | ||||
| } | ||||
| 
 | ||||
| static void virtio_mem_remove(struct virtio_device *vdev) | ||||
| static void virtio_mem_deinit_hotplug(struct virtio_mem *vm) | ||||
| { | ||||
| 	struct virtio_mem *vm = vdev->priv; | ||||
| 	unsigned long mb_id; | ||||
| 	int rc; | ||||
| 
 | ||||
| @ -2706,7 +2816,8 @@ static void virtio_mem_remove(struct virtio_device *vdev) | ||||
| 	 * away. Warn at least. | ||||
| 	 */ | ||||
| 	if (virtio_mem_has_memory_added(vm)) { | ||||
| 		dev_warn(&vdev->dev, "device still has system memory added\n"); | ||||
| 		dev_warn(&vm->vdev->dev, | ||||
| 			 "device still has system memory added\n"); | ||||
| 	} else { | ||||
| 		virtio_mem_delete_resource(vm); | ||||
| 		kfree_const(vm->resource_name); | ||||
| @ -2720,6 +2831,23 @@ static void virtio_mem_remove(struct virtio_device *vdev) | ||||
| 	} else { | ||||
| 		vfree(vm->bbm.bb_states); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| static void virtio_mem_deinit_kdump(struct virtio_mem *vm) | ||||
| { | ||||
| #ifdef CONFIG_PROC_VMCORE | ||||
| 	unregister_vmcore_cb(&vm->vmcore_cb); | ||||
| #endif /* CONFIG_PROC_VMCORE */ | ||||
| } | ||||
| 
 | ||||
| static void virtio_mem_remove(struct virtio_device *vdev) | ||||
| { | ||||
| 	struct virtio_mem *vm = vdev->priv; | ||||
| 
 | ||||
| 	if (vm->in_kdump) | ||||
| 		virtio_mem_deinit_kdump(vm); | ||||
| 	else | ||||
| 		virtio_mem_deinit_hotplug(vm); | ||||
| 
 | ||||
| 	/* reset the device and cleanup the queues */ | ||||
| 	vdev->config->reset(vdev); | ||||
| @ -2733,6 +2861,9 @@ static void virtio_mem_config_changed(struct virtio_device *vdev) | ||||
| { | ||||
| 	struct virtio_mem *vm = vdev->priv; | ||||
| 
 | ||||
| 	if (unlikely(vm->in_kdump)) | ||||
| 		return; | ||||
| 
 | ||||
| 	atomic_set(&vm->config_changed, 1); | ||||
| 	virtio_mem_retry(vm); | ||||
| } | ||||
|  | ||||
| @ -156,7 +156,7 @@ static int padzero(unsigned long elf_bss) | ||||
| #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items)) | ||||
| #define STACK_ROUND(sp, items) \ | ||||
| 	(((unsigned long) (sp - items)) &~ 15UL) | ||||
| #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; }) | ||||
| #define STACK_ALLOC(sp, len) (sp -= len) | ||||
| #endif | ||||
| 
 | ||||
| #ifndef ELF_BASE_PLATFORM | ||||
| @ -1074,20 +1074,26 @@ out_free_interp: | ||||
| 
 | ||||
| 		vaddr = elf_ppnt->p_vaddr; | ||||
| 		/*
 | ||||
| 		 * If we are loading ET_EXEC or we have already performed | ||||
| 		 * the ET_DYN load_addr calculations, proceed normally. | ||||
| 		 * The first time through the loop, load_addr_set is false: | ||||
| 		 * layout will be calculated. Once set, use MAP_FIXED since | ||||
| 		 * we know we've already safely mapped the entire region with | ||||
| 		 * MAP_FIXED_NOREPLACE in the once-per-binary logic following. | ||||
| 		 */ | ||||
| 		if (elf_ex->e_type == ET_EXEC || load_addr_set) { | ||||
| 		if (load_addr_set) { | ||||
| 			elf_flags |= MAP_FIXED; | ||||
| 		} else if (elf_ex->e_type == ET_EXEC) { | ||||
| 			/*
 | ||||
| 			 * This logic is run once for the first LOAD Program | ||||
| 			 * Header for ET_EXEC binaries. No special handling | ||||
| 			 * is needed. | ||||
| 			 */ | ||||
| 			elf_flags |= MAP_FIXED_NOREPLACE; | ||||
| 		} else if (elf_ex->e_type == ET_DYN) { | ||||
| 			/*
 | ||||
| 			 * This logic is run once for the first LOAD Program | ||||
| 			 * Header for ET_DYN binaries to calculate the | ||||
| 			 * randomization (load_bias) for all the LOAD | ||||
| 			 * Program Headers, and to calculate the entire | ||||
| 			 * size of the ELF mapping (total_size). (Note that | ||||
| 			 * load_addr_set is set to true later once the | ||||
| 			 * initial mapping is performed.) | ||||
| 			 * Program Headers. | ||||
| 			 * | ||||
| 			 * There are effectively two types of ET_DYN | ||||
| 			 * binaries: programs (i.e. PIE: ET_DYN with INTERP) | ||||
| @ -1108,7 +1114,7 @@ out_free_interp: | ||||
| 			 * Therefore, programs are loaded offset from | ||||
| 			 * ELF_ET_DYN_BASE and loaders are loaded into the | ||||
| 			 * independently randomized mmap region (0 load_bias | ||||
| 			 * without MAP_FIXED). | ||||
| 			 * without MAP_FIXED nor MAP_FIXED_NOREPLACE). | ||||
| 			 */ | ||||
| 			if (interpreter) { | ||||
| 				load_bias = ELF_ET_DYN_BASE; | ||||
| @ -1117,7 +1123,7 @@ out_free_interp: | ||||
| 				alignment = maximum_alignment(elf_phdata, elf_ex->e_phnum); | ||||
| 				if (alignment) | ||||
| 					load_bias &= ~(alignment - 1); | ||||
| 				elf_flags |= MAP_FIXED; | ||||
| 				elf_flags |= MAP_FIXED_NOREPLACE; | ||||
| 			} else | ||||
| 				load_bias = 0; | ||||
| 
 | ||||
| @ -1129,7 +1135,14 @@ out_free_interp: | ||||
| 			 * is then page aligned. | ||||
| 			 */ | ||||
| 			load_bias = ELF_PAGESTART(load_bias - vaddr); | ||||
| 		} | ||||
| 
 | ||||
| 		/*
 | ||||
| 		 * Calculate the entire size of the ELF mapping (total_size). | ||||
| 		 * (Note that load_addr_set is set to true later once the | ||||
| 		 * initial mapping is performed.) | ||||
| 		 */ | ||||
| 		if (!load_addr_set) { | ||||
| 			total_size = total_mapping_size(elf_phdata, | ||||
| 							elf_ex->e_phnum); | ||||
| 			if (!total_size) { | ||||
|  | ||||
| @ -63,9 +63,10 @@ struct inode * coda_iget(struct super_block * sb, struct CodaFid * fid, | ||||
| 	struct inode *inode; | ||||
| 	struct coda_inode_info *cii; | ||||
| 	unsigned long hash = coda_f2i(fid); | ||||
| 	umode_t inode_type = coda_inode_type(attr); | ||||
| 
 | ||||
| retry: | ||||
| 	inode = iget5_locked(sb, hash, coda_test_inode, coda_set_inode, fid); | ||||
| 
 | ||||
| 	if (!inode) | ||||
| 		return ERR_PTR(-ENOMEM); | ||||
| 
 | ||||
| @ -75,11 +76,15 @@ struct inode * coda_iget(struct super_block * sb, struct CodaFid * fid, | ||||
| 		inode->i_ino = hash; | ||||
| 		/* inode is locked and unique, no need to grab cii->c_lock */ | ||||
| 		cii->c_mapcount = 0; | ||||
| 		coda_fill_inode(inode, attr); | ||||
| 		unlock_new_inode(inode); | ||||
| 	} else if ((inode->i_mode & S_IFMT) != inode_type) { | ||||
| 		/* Inode has changed type, mark bad and grab a new one */ | ||||
| 		remove_inode_hash(inode); | ||||
| 		coda_flag_inode(inode, C_PURGE); | ||||
| 		iput(inode); | ||||
| 		goto retry; | ||||
| 	} | ||||
| 
 | ||||
| 	/* always replace the attributes, type might have changed */ | ||||
| 	coda_fill_inode(inode, attr); | ||||
| 	return inode; | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -87,28 +87,27 @@ static struct coda_timespec timespec64_to_coda(struct timespec64 ts64) | ||||
| } | ||||
| 
 | ||||
| /* utility functions below */ | ||||
| umode_t coda_inode_type(struct coda_vattr *attr) | ||||
| { | ||||
| 	switch (attr->va_type) { | ||||
| 	case C_VREG: | ||||
| 		return S_IFREG; | ||||
| 	case C_VDIR: | ||||
| 		return S_IFDIR; | ||||
| 	case C_VLNK: | ||||
| 		return S_IFLNK; | ||||
| 	case C_VNON: | ||||
| 	default: | ||||
| 		return 0; | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| void coda_vattr_to_iattr(struct inode *inode, struct coda_vattr *attr) | ||||
| { | ||||
|         int inode_type; | ||||
|         /* inode's i_flags, i_ino are set by iget 
 | ||||
|            XXX: is this all we need ?? | ||||
|            */ | ||||
|         switch (attr->va_type) { | ||||
|         case C_VNON: | ||||
|                 inode_type  = 0; | ||||
|                 break; | ||||
|         case C_VREG: | ||||
|                 inode_type = S_IFREG; | ||||
|                 break; | ||||
|         case C_VDIR: | ||||
|                 inode_type = S_IFDIR; | ||||
|                 break; | ||||
|         case C_VLNK: | ||||
|                 inode_type = S_IFLNK; | ||||
|                 break; | ||||
|         default: | ||||
|                 inode_type = 0; | ||||
|         } | ||||
| 	/* inode's i_flags, i_ino are set by iget
 | ||||
| 	 * XXX: is this all we need ?? | ||||
| 	 */ | ||||
| 	umode_t inode_type = coda_inode_type(attr); | ||||
| 	inode->i_mode |= inode_type; | ||||
| 
 | ||||
| 	if (attr->va_mode != (u_short) -1) | ||||
|  | ||||
| @ -53,10 +53,11 @@ int coda_getattr(struct user_namespace *, const struct path *, struct kstat *, | ||||
| 		 u32, unsigned int); | ||||
| int coda_setattr(struct user_namespace *, struct dentry *, struct iattr *); | ||||
| 
 | ||||
| /* this file:  heloers */ | ||||
| /* this file:  helpers */ | ||||
| char *coda_f2s(struct CodaFid *f); | ||||
| int coda_iscontrol(const char *name, size_t length); | ||||
| 
 | ||||
| umode_t coda_inode_type(struct coda_vattr *attr); | ||||
| void coda_vattr_to_iattr(struct inode *, struct coda_vattr *); | ||||
| void coda_iattr_to_vattr(struct iattr *, struct coda_vattr *); | ||||
| unsigned short coda_flags_to_cflags(unsigned short); | ||||
| @ -83,6 +84,9 @@ static __inline__ void coda_flag_inode(struct inode *inode, int flag) | ||||
| { | ||||
| 	struct coda_inode_info *cii = ITOC(inode); | ||||
| 
 | ||||
| 	if (!inode) | ||||
| 		return; | ||||
| 
 | ||||
| 	spin_lock(&cii->c_lock); | ||||
| 	cii->c_flags |= flag; | ||||
| 	spin_unlock(&cii->c_lock); | ||||
|  | ||||
| @ -317,13 +317,10 @@ static int coda_rename(struct user_namespace *mnt_userns, struct inode *old_dir, | ||||
| 				coda_dir_drop_nlink(old_dir); | ||||
| 				coda_dir_inc_nlink(new_dir); | ||||
| 			} | ||||
| 			coda_dir_update_mtime(old_dir); | ||||
| 			coda_dir_update_mtime(new_dir); | ||||
| 			coda_flag_inode(d_inode(new_dentry), C_VATTR); | ||||
| 		} else { | ||||
| 			coda_flag_inode(old_dir, C_VATTR); | ||||
| 			coda_flag_inode(new_dir, C_VATTR); | ||||
| 		} | ||||
| 		coda_dir_update_mtime(old_dir); | ||||
| 		coda_dir_update_mtime(new_dir); | ||||
| 	} | ||||
| 	return error; | ||||
| } | ||||
| @ -499,15 +496,20 @@ out: | ||||
|  */ | ||||
| static int coda_dentry_delete(const struct dentry * dentry) | ||||
| { | ||||
| 	int flags; | ||||
| 	struct inode *inode; | ||||
| 	struct coda_inode_info *cii; | ||||
| 
 | ||||
| 	if (d_really_is_negative(dentry))  | ||||
| 		return 0; | ||||
| 
 | ||||
| 	flags = (ITOC(d_inode(dentry))->c_flags) & C_PURGE; | ||||
| 	if (is_bad_inode(d_inode(dentry)) || flags) { | ||||
| 	inode = d_inode(dentry); | ||||
| 	if (!inode || is_bad_inode(inode)) | ||||
| 		return 1; | ||||
| 	} | ||||
| 
 | ||||
| 	cii = ITOC(inode); | ||||
| 	if (cii->c_flags & C_PURGE) | ||||
| 		return 1; | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -8,6 +8,7 @@ | ||||
|  * to the Coda project. Contact Peter Braam <coda@cs.cmu.edu>. | ||||
|  */ | ||||
| 
 | ||||
| #include <linux/refcount.h> | ||||
| #include <linux/types.h> | ||||
| #include <linux/kernel.h> | ||||
| #include <linux/time.h> | ||||
| @ -28,7 +29,7 @@ | ||||
| #include "coda_int.h" | ||||
| 
 | ||||
| struct coda_vm_ops { | ||||
| 	atomic_t refcnt; | ||||
| 	refcount_t refcnt; | ||||
| 	struct file *coda_file; | ||||
| 	const struct vm_operations_struct *host_vm_ops; | ||||
| 	struct vm_operations_struct vm_ops; | ||||
| @ -98,7 +99,7 @@ coda_vm_open(struct vm_area_struct *vma) | ||||
| 	struct coda_vm_ops *cvm_ops = | ||||
| 		container_of(vma->vm_ops, struct coda_vm_ops, vm_ops); | ||||
| 
 | ||||
| 	atomic_inc(&cvm_ops->refcnt); | ||||
| 	refcount_inc(&cvm_ops->refcnt); | ||||
| 
 | ||||
| 	if (cvm_ops->host_vm_ops && cvm_ops->host_vm_ops->open) | ||||
| 		cvm_ops->host_vm_ops->open(vma); | ||||
| @ -113,7 +114,7 @@ coda_vm_close(struct vm_area_struct *vma) | ||||
| 	if (cvm_ops->host_vm_ops && cvm_ops->host_vm_ops->close) | ||||
| 		cvm_ops->host_vm_ops->close(vma); | ||||
| 
 | ||||
| 	if (atomic_dec_and_test(&cvm_ops->refcnt)) { | ||||
| 	if (refcount_dec_and_test(&cvm_ops->refcnt)) { | ||||
| 		vma->vm_ops = cvm_ops->host_vm_ops; | ||||
| 		fput(cvm_ops->coda_file); | ||||
| 		kfree(cvm_ops); | ||||
| @ -189,7 +190,7 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma) | ||||
| 		cvm_ops->vm_ops.open = coda_vm_open; | ||||
| 		cvm_ops->vm_ops.close = coda_vm_close; | ||||
| 		cvm_ops->coda_file = coda_file; | ||||
| 		atomic_set(&cvm_ops->refcnt, 1); | ||||
| 		refcount_set(&cvm_ops->refcnt, 1); | ||||
| 
 | ||||
| 		vma->vm_ops = &cvm_ops->vm_ops; | ||||
| 	} | ||||
| @ -238,11 +239,10 @@ int coda_release(struct inode *coda_inode, struct file *coda_file) | ||||
| 	struct coda_file_info *cfi; | ||||
| 	struct coda_inode_info *cii; | ||||
| 	struct inode *host_inode; | ||||
| 	int err; | ||||
| 
 | ||||
| 	cfi = coda_ftoc(coda_file); | ||||
| 
 | ||||
| 	err = venus_close(coda_inode->i_sb, coda_i2f(coda_inode), | ||||
| 	venus_close(coda_inode->i_sb, coda_i2f(coda_inode), | ||||
| 			  coda_flags, coda_file->f_cred->fsuid); | ||||
| 
 | ||||
| 	host_inode = file_inode(cfi->cfi_container); | ||||
|  | ||||
| @ -122,14 +122,10 @@ static ssize_t coda_psdev_write(struct file *file, const char __user *buf, | ||||
| 				hdr.opcode, hdr.unique); | ||||
| 		        nbytes = size; | ||||
| 		} | ||||
| 		dcbuf = kvmalloc(nbytes, GFP_KERNEL); | ||||
| 		if (!dcbuf) { | ||||
| 			retval = -ENOMEM; | ||||
| 			goto out; | ||||
| 		} | ||||
| 		if (copy_from_user(dcbuf, buf, nbytes)) { | ||||
| 			kvfree(dcbuf); | ||||
| 			retval = -EFAULT; | ||||
| 
 | ||||
| 		dcbuf = vmemdup_user(buf, nbytes); | ||||
| 		if (IS_ERR(dcbuf)) { | ||||
| 			retval = PTR_ERR(dcbuf); | ||||
| 			goto out; | ||||
| 		} | ||||
| 
 | ||||
| @ -388,7 +384,7 @@ MODULE_AUTHOR("Jan Harkes, Peter J. Braam"); | ||||
| MODULE_DESCRIPTION("Coda Distributed File System VFS interface"); | ||||
| MODULE_ALIAS_CHARDEV_MAJOR(CODA_PSDEV_MAJOR); | ||||
| MODULE_LICENSE("GPL"); | ||||
| MODULE_VERSION("7.0"); | ||||
| MODULE_VERSION("7.2"); | ||||
| 
 | ||||
| static int __init init_coda(void) | ||||
| { | ||||
|  | ||||
| @ -744,7 +744,8 @@ static int coda_upcall(struct venus_comm *vcp, | ||||
| 	list_add_tail(&req->uc_chain, &vcp->vc_pending); | ||||
| 	wake_up_interruptible(&vcp->vc_waitq); | ||||
| 
 | ||||
| 	if (req->uc_flags & CODA_REQ_ASYNC) { | ||||
| 	/* We can return early on asynchronous requests */ | ||||
| 	if (outSize == NULL) { | ||||
| 		mutex_unlock(&vcp->vc_mutex); | ||||
| 		return 0; | ||||
| 	} | ||||
|  | ||||
| @ -462,8 +462,7 @@ int hfs_write_inode(struct inode *inode, struct writeback_control *wbc) | ||||
| 		goto out; | ||||
| 
 | ||||
| 	if (S_ISDIR(main_inode->i_mode)) { | ||||
| 		if (fd.entrylength < sizeof(struct hfs_cat_dir)) | ||||
| 			/* panic? */; | ||||
| 		WARN_ON(fd.entrylength < sizeof(struct hfs_cat_dir)); | ||||
| 		hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, | ||||
| 			   sizeof(struct hfs_cat_dir)); | ||||
| 		if (rec.type != HFS_CDR_DIR || | ||||
| @ -483,8 +482,7 @@ int hfs_write_inode(struct inode *inode, struct writeback_control *wbc) | ||||
| 		hfs_bnode_write(fd.bnode, &rec, fd.entryoffset, | ||||
| 				sizeof(struct hfs_cat_file)); | ||||
| 	} else { | ||||
| 		if (fd.entrylength < sizeof(struct hfs_cat_file)) | ||||
| 			/* panic? */; | ||||
| 		WARN_ON(fd.entrylength < sizeof(struct hfs_cat_file)); | ||||
| 		hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, | ||||
| 			   sizeof(struct hfs_cat_file)); | ||||
| 		if (rec.type != HFS_CDR_FIL || | ||||
|  | ||||
| @ -509,8 +509,7 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd) | ||||
| 	if (type == HFSPLUS_FOLDER) { | ||||
| 		struct hfsplus_cat_folder *folder = &entry.folder; | ||||
| 
 | ||||
| 		if (fd->entrylength < sizeof(struct hfsplus_cat_folder)) | ||||
| 			/* panic? */; | ||||
| 		WARN_ON(fd->entrylength < sizeof(struct hfsplus_cat_folder)); | ||||
| 		hfs_bnode_read(fd->bnode, &entry, fd->entryoffset, | ||||
| 					sizeof(struct hfsplus_cat_folder)); | ||||
| 		hfsplus_get_perms(inode, &folder->permissions, 1); | ||||
| @ -530,8 +529,7 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd) | ||||
| 	} else if (type == HFSPLUS_FILE) { | ||||
| 		struct hfsplus_cat_file *file = &entry.file; | ||||
| 
 | ||||
| 		if (fd->entrylength < sizeof(struct hfsplus_cat_file)) | ||||
| 			/* panic? */; | ||||
| 		WARN_ON(fd->entrylength < sizeof(struct hfsplus_cat_file)); | ||||
| 		hfs_bnode_read(fd->bnode, &entry, fd->entryoffset, | ||||
| 					sizeof(struct hfsplus_cat_file)); | ||||
| 
 | ||||
| @ -588,8 +586,7 @@ int hfsplus_cat_write_inode(struct inode *inode) | ||||
| 	if (S_ISDIR(main_inode->i_mode)) { | ||||
| 		struct hfsplus_cat_folder *folder = &entry.folder; | ||||
| 
 | ||||
| 		if (fd.entrylength < sizeof(struct hfsplus_cat_folder)) | ||||
| 			/* panic? */; | ||||
| 		WARN_ON(fd.entrylength < sizeof(struct hfsplus_cat_folder)); | ||||
| 		hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, | ||||
| 					sizeof(struct hfsplus_cat_folder)); | ||||
| 		/* simple node checks? */ | ||||
| @ -614,8 +611,7 @@ int hfsplus_cat_write_inode(struct inode *inode) | ||||
| 	} else { | ||||
| 		struct hfsplus_cat_file *file = &entry.file; | ||||
| 
 | ||||
| 		if (fd.entrylength < sizeof(struct hfsplus_cat_file)) | ||||
| 			/* panic? */; | ||||
| 		WARN_ON(fd.entrylength < sizeof(struct hfsplus_cat_file)); | ||||
| 		hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, | ||||
| 					sizeof(struct hfsplus_cat_file)); | ||||
| 		hfsplus_inode_write_fork(inode, &file->data_fork); | ||||
|  | ||||
| @ -1446,8 +1446,8 @@ static int get_hstate_idx(int page_size_log) | ||||
|  * otherwise hugetlb_reserve_pages reserves one less hugepages than intended. | ||||
|  */ | ||||
| struct file *hugetlb_file_setup(const char *name, size_t size, | ||||
| 				vm_flags_t acctflag, struct ucounts **ucounts, | ||||
| 				int creat_flags, int page_size_log) | ||||
| 				vm_flags_t acctflag, int creat_flags, | ||||
| 				int page_size_log) | ||||
| { | ||||
| 	struct inode *inode; | ||||
| 	struct vfsmount *mnt; | ||||
| @ -1458,22 +1458,19 @@ struct file *hugetlb_file_setup(const char *name, size_t size, | ||||
| 	if (hstate_idx < 0) | ||||
| 		return ERR_PTR(-ENODEV); | ||||
| 
 | ||||
| 	*ucounts = NULL; | ||||
| 	mnt = hugetlbfs_vfsmount[hstate_idx]; | ||||
| 	if (!mnt) | ||||
| 		return ERR_PTR(-ENOENT); | ||||
| 
 | ||||
| 	if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) { | ||||
| 		*ucounts = current_ucounts(); | ||||
| 		if (user_shm_lock(size, *ucounts)) { | ||||
| 			task_lock(current); | ||||
| 			pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is deprecated\n", | ||||
| 		struct ucounts *ucounts = current_ucounts(); | ||||
| 
 | ||||
| 		if (user_shm_lock(size, ucounts)) { | ||||
| 			pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is obsolete\n", | ||||
| 				current->comm, current->pid); | ||||
| 			task_unlock(current); | ||||
| 		} else { | ||||
| 			*ucounts = NULL; | ||||
| 			return ERR_PTR(-EPERM); | ||||
| 			user_shm_unlock(size, ucounts); | ||||
| 		} | ||||
| 		return ERR_PTR(-EPERM); | ||||
| 	} | ||||
| 
 | ||||
| 	file = ERR_PTR(-ENOSPC); | ||||
| @ -1498,10 +1495,6 @@ struct file *hugetlb_file_setup(const char *name, size_t size, | ||||
| 
 | ||||
| 	iput(inode); | ||||
| out: | ||||
| 	if (*ucounts) { | ||||
| 		user_shm_unlock(size, *ucounts); | ||||
| 		*ucounts = NULL; | ||||
| 	} | ||||
| 	return file; | ||||
| } | ||||
| 
 | ||||
|  | ||||
							
								
								
									
										46
									
								
								fs/inode.c
									
									
									
									
									
								
							
							
						
						
									
										46
									
								
								fs/inode.c
									
									
									
									
									
								
							| @ -428,11 +428,20 @@ void ihold(struct inode *inode) | ||||
| } | ||||
| EXPORT_SYMBOL(ihold); | ||||
| 
 | ||||
| static void inode_lru_list_add(struct inode *inode) | ||||
| static void __inode_add_lru(struct inode *inode, bool rotate) | ||||
| { | ||||
| 	if (inode->i_state & (I_DIRTY_ALL | I_SYNC | I_FREEING | I_WILL_FREE)) | ||||
| 		return; | ||||
| 	if (atomic_read(&inode->i_count)) | ||||
| 		return; | ||||
| 	if (!(inode->i_sb->s_flags & SB_ACTIVE)) | ||||
| 		return; | ||||
| 	if (!mapping_shrinkable(&inode->i_data)) | ||||
| 		return; | ||||
| 
 | ||||
| 	if (list_lru_add(&inode->i_sb->s_inode_lru, &inode->i_lru)) | ||||
| 		this_cpu_inc(nr_unused); | ||||
| 	else | ||||
| 	else if (rotate) | ||||
| 		inode->i_state |= I_REFERENCED; | ||||
| } | ||||
| 
 | ||||
| @ -443,16 +452,11 @@ static void inode_lru_list_add(struct inode *inode) | ||||
|  */ | ||||
| void inode_add_lru(struct inode *inode) | ||||
| { | ||||
| 	if (!(inode->i_state & (I_DIRTY_ALL | I_SYNC | | ||||
| 				I_FREEING | I_WILL_FREE)) && | ||||
| 	    !atomic_read(&inode->i_count) && inode->i_sb->s_flags & SB_ACTIVE) | ||||
| 		inode_lru_list_add(inode); | ||||
| 	__inode_add_lru(inode, false); | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| static void inode_lru_list_del(struct inode *inode) | ||||
| { | ||||
| 
 | ||||
| 	if (list_lru_del(&inode->i_sb->s_inode_lru, &inode->i_lru)) | ||||
| 		this_cpu_dec(nr_unused); | ||||
| } | ||||
| @ -728,10 +732,6 @@ again: | ||||
| /*
 | ||||
|  * Isolate the inode from the LRU in preparation for freeing it. | ||||
|  * | ||||
|  * Any inodes which are pinned purely because of attached pagecache have their | ||||
|  * pagecache removed.  If the inode has metadata buffers attached to | ||||
|  * mapping->private_list then try to remove them. | ||||
|  * | ||||
|  * If the inode has the I_REFERENCED flag set, then it means that it has been | ||||
|  * used recently - the flag is set in iput_final(). When we encounter such an | ||||
|  * inode, clear the flag and move it to the back of the LRU so it gets another | ||||
| @ -747,31 +747,39 @@ static enum lru_status inode_lru_isolate(struct list_head *item, | ||||
| 	struct inode	*inode = container_of(item, struct inode, i_lru); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * we are inverting the lru lock/inode->i_lock here, so use a trylock. | ||||
| 	 * If we fail to get the lock, just skip it. | ||||
| 	 * We are inverting the lru lock/inode->i_lock here, so use a | ||||
| 	 * trylock. If we fail to get the lock, just skip it. | ||||
| 	 */ | ||||
| 	if (!spin_trylock(&inode->i_lock)) | ||||
| 		return LRU_SKIP; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Referenced or dirty inodes are still in use. Give them another pass | ||||
| 	 * through the LRU as we canot reclaim them now. | ||||
| 	 * Inodes can get referenced, redirtied, or repopulated while | ||||
| 	 * they're already on the LRU, and this can make them | ||||
| 	 * unreclaimable for a while. Remove them lazily here; iput, | ||||
| 	 * sync, or the last page cache deletion will requeue them. | ||||
| 	 */ | ||||
| 	if (atomic_read(&inode->i_count) || | ||||
| 	    (inode->i_state & ~I_REFERENCED)) { | ||||
| 	    (inode->i_state & ~I_REFERENCED) || | ||||
| 	    !mapping_shrinkable(&inode->i_data)) { | ||||
| 		list_lru_isolate(lru, &inode->i_lru); | ||||
| 		spin_unlock(&inode->i_lock); | ||||
| 		this_cpu_dec(nr_unused); | ||||
| 		return LRU_REMOVED; | ||||
| 	} | ||||
| 
 | ||||
| 	/* recently referenced inodes get one more pass */ | ||||
| 	/* Recently referenced inodes get one more pass */ | ||||
| 	if (inode->i_state & I_REFERENCED) { | ||||
| 		inode->i_state &= ~I_REFERENCED; | ||||
| 		spin_unlock(&inode->i_lock); | ||||
| 		return LRU_ROTATE; | ||||
| 	} | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * On highmem systems, mapping_shrinkable() permits dropping | ||||
| 	 * page cache in order to free up struct inodes: lowmem might | ||||
| 	 * be under pressure before the cache inside the highmem zone. | ||||
| 	 */ | ||||
| 	if (inode_has_buffers(inode) || !mapping_empty(&inode->i_data)) { | ||||
| 		__iget(inode); | ||||
| 		spin_unlock(&inode->i_lock); | ||||
| @ -1638,7 +1646,7 @@ static void iput_final(struct inode *inode) | ||||
| 	if (!drop && | ||||
| 	    !(inode->i_state & I_DONTCACHE) && | ||||
| 	    (sb->s_flags & SB_ACTIVE)) { | ||||
| 		inode_add_lru(inode); | ||||
| 		__inode_add_lru(inode, true); | ||||
| 		spin_unlock(&inode->i_lock); | ||||
| 		return; | ||||
| 	} | ||||
|  | ||||
| @ -138,7 +138,6 @@ extern int vfs_open(const struct path *, struct file *); | ||||
|  * inode.c | ||||
|  */ | ||||
| extern long prune_icache_sb(struct super_block *sb, struct shrink_control *sc); | ||||
| extern void inode_add_lru(struct inode *inode); | ||||
| extern int dentry_needs_remove_privs(struct dentry *dentry); | ||||
| 
 | ||||
| /*
 | ||||
|  | ||||
| @ -1,6 +1,6 @@ | ||||
| // SPDX-License-Identifier: GPL-2.0+
 | ||||
| /*
 | ||||
|  * alloc.c - NILFS dat/inode allocator | ||||
|  * NILFS dat/inode allocator | ||||
|  * | ||||
|  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. | ||||
|  * | ||||
|  | ||||
| @ -1,6 +1,6 @@ | ||||
| /* SPDX-License-Identifier: GPL-2.0+ */ | ||||
| /*
 | ||||
|  * alloc.h - persistent object (dat entry/disk inode) allocator/deallocator | ||||
|  * Persistent object (dat entry/disk inode) allocator/deallocator | ||||
|  * | ||||
|  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. | ||||
|  * | ||||
|  | ||||
| @ -1,6 +1,6 @@ | ||||
| // SPDX-License-Identifier: GPL-2.0+
 | ||||
| /*
 | ||||
|  * bmap.c - NILFS block mapping. | ||||
|  * NILFS block mapping. | ||||
|  * | ||||
|  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. | ||||
|  * | ||||
|  | ||||
| @ -1,6 +1,6 @@ | ||||
| /* SPDX-License-Identifier: GPL-2.0+ */ | ||||
| /*
 | ||||
|  * bmap.h - NILFS block mapping. | ||||
|  * NILFS block mapping. | ||||
|  * | ||||
|  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. | ||||
|  * | ||||
|  | ||||
| @ -1,6 +1,6 @@ | ||||
| // SPDX-License-Identifier: GPL-2.0+
 | ||||
| /*
 | ||||
|  * btnode.c - NILFS B-tree node cache | ||||
|  * NILFS B-tree node cache | ||||
|  * | ||||
|  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | ||||
|  * | ||||
|  | ||||
| @ -1,6 +1,6 @@ | ||||
| /* SPDX-License-Identifier: GPL-2.0+ */ | ||||
| /*
 | ||||
|  * btnode.h - NILFS B-tree node cache | ||||
|  * NILFS B-tree node cache | ||||
|  * | ||||
|  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | ||||
|  * | ||||
|  | ||||
| @ -1,6 +1,6 @@ | ||||
| // SPDX-License-Identifier: GPL-2.0+
 | ||||
| /*
 | ||||
|  * btree.c - NILFS B-tree. | ||||
|  * NILFS B-tree. | ||||
|  * | ||||
|  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | ||||
|  * | ||||
|  | ||||
| @ -1,6 +1,6 @@ | ||||
| /* SPDX-License-Identifier: GPL-2.0+ */ | ||||
| /*
 | ||||
|  * btree.h - NILFS B-tree. | ||||
|  * NILFS B-tree. | ||||
|  * | ||||
|  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | ||||
|  * | ||||
|  | ||||
| @ -1,6 +1,6 @@ | ||||
| // SPDX-License-Identifier: GPL-2.0+
 | ||||
| /*
 | ||||
|  * cpfile.c - NILFS checkpoint file. | ||||
|  * NILFS checkpoint file. | ||||
|  * | ||||
|  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. | ||||
|  * | ||||
|  | ||||
| @ -1,6 +1,6 @@ | ||||
| /* SPDX-License-Identifier: GPL-2.0+ */ | ||||
| /*
 | ||||
|  * cpfile.h - NILFS checkpoint file. | ||||
|  * NILFS checkpoint file. | ||||
|  * | ||||
|  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. | ||||
|  * | ||||
|  | ||||
| @ -1,6 +1,6 @@ | ||||
| // SPDX-License-Identifier: GPL-2.0+
 | ||||
| /*
 | ||||
|  * dat.c - NILFS disk address translation. | ||||
|  * NILFS disk address translation. | ||||
|  * | ||||
|  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. | ||||
|  * | ||||
|  | ||||
| @ -1,6 +1,6 @@ | ||||
| /* SPDX-License-Identifier: GPL-2.0+ */ | ||||
| /*
 | ||||
|  * dat.h - NILFS disk address translation. | ||||
|  * NILFS disk address translation. | ||||
|  * | ||||
|  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. | ||||
|  * | ||||
|  | ||||
| @ -1,6 +1,6 @@ | ||||
| // SPDX-License-Identifier: GPL-2.0+
 | ||||
| /*
 | ||||
|  * dir.c - NILFS directory entry operations | ||||
|  * NILFS directory entry operations | ||||
|  * | ||||
|  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | ||||
|  * | ||||
|  | ||||
| @ -1,6 +1,6 @@ | ||||
| // SPDX-License-Identifier: GPL-2.0+
 | ||||
| /*
 | ||||
|  * direct.c - NILFS direct block pointer. | ||||
|  * NILFS direct block pointer. | ||||
|  * | ||||
|  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. | ||||
|  * | ||||
|  | ||||
| @ -1,6 +1,6 @@ | ||||
| /* SPDX-License-Identifier: GPL-2.0+ */ | ||||
| /*
 | ||||
|  * direct.h - NILFS direct block pointer. | ||||
|  * NILFS direct block pointer. | ||||
|  * | ||||
|  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. | ||||
|  * | ||||
|  | ||||
| @ -1,6 +1,6 @@ | ||||
| // SPDX-License-Identifier: GPL-2.0+
 | ||||
| /*
 | ||||
|  * file.c - NILFS regular file handling primitives including fsync(). | ||||
|  * NILFS regular file handling primitives including fsync(). | ||||
|  * | ||||
|  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | ||||
|  * | ||||
|  | ||||
| @ -1,6 +1,6 @@ | ||||
| // SPDX-License-Identifier: GPL-2.0+
 | ||||
| /*
 | ||||
|  * gcinode.c - dummy inodes to buffer blocks for garbage collection | ||||
|  * Dummy inodes to buffer blocks for garbage collection | ||||
|  * | ||||
|  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | ||||
|  * | ||||
|  | ||||
| @ -1,6 +1,6 @@ | ||||
| // SPDX-License-Identifier: GPL-2.0+
 | ||||
| /*
 | ||||
|  * ifile.c - NILFS inode file | ||||
|  * NILFS inode file | ||||
|  * | ||||
|  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. | ||||
|  * | ||||
|  | ||||
| @ -1,6 +1,6 @@ | ||||
| /* SPDX-License-Identifier: GPL-2.0+ */ | ||||
| /*
 | ||||
|  * ifile.h - NILFS inode file | ||||
|  * NILFS inode file | ||||
|  * | ||||
|  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. | ||||
|  * | ||||
|  | ||||
| @ -1,6 +1,6 @@ | ||||
| // SPDX-License-Identifier: GPL-2.0+
 | ||||
| /*
 | ||||
|  * inode.c - NILFS inode operations. | ||||
|  * NILFS inode operations. | ||||
|  * | ||||
|  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | ||||
|  * | ||||
|  | ||||
| @ -1,6 +1,6 @@ | ||||
| // SPDX-License-Identifier: GPL-2.0+
 | ||||
| /*
 | ||||
|  * ioctl.c - NILFS ioctl operations. | ||||
|  * NILFS ioctl operations. | ||||
|  * | ||||
|  * Copyright (C) 2007, 2008 Nippon Telegraph and Telephone Corporation. | ||||
|  * | ||||
|  | ||||
| @ -1,6 +1,6 @@ | ||||
| // SPDX-License-Identifier: GPL-2.0+
 | ||||
| /*
 | ||||
|  * mdt.c - meta data file for NILFS | ||||
|  * Meta data file for NILFS | ||||
|  * | ||||
|  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | ||||
|  * | ||||
|  | ||||
| @ -1,6 +1,6 @@ | ||||
| /* SPDX-License-Identifier: GPL-2.0+ */ | ||||
| /*
 | ||||
|  * mdt.h - NILFS meta data file prototype and definitions | ||||
|  * NILFS meta data file prototype and definitions | ||||
|  * | ||||
|  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | ||||
|  * | ||||
|  | ||||
| @ -1,6 +1,6 @@ | ||||
| // SPDX-License-Identifier: GPL-2.0+
 | ||||
| /*
 | ||||
|  * namei.c - NILFS pathname lookup operations. | ||||
|  * NILFS pathname lookup operations. | ||||
|  * | ||||
|  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | ||||
|  * | ||||
|  | ||||
| @ -1,6 +1,6 @@ | ||||
| /* SPDX-License-Identifier: GPL-2.0+ */ | ||||
| /*
 | ||||
|  * nilfs.h - NILFS local header file. | ||||
|  * NILFS local header file. | ||||
|  * | ||||
|  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | ||||
|  * | ||||
|  | ||||
| @ -1,6 +1,6 @@ | ||||
| // SPDX-License-Identifier: GPL-2.0+
 | ||||
| /*
 | ||||
|  * page.c - buffer/page management specific to NILFS | ||||
|  * Buffer/page management specific to NILFS | ||||
|  * | ||||
|  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | ||||
|  * | ||||
|  | ||||
| @ -1,6 +1,6 @@ | ||||
| /* SPDX-License-Identifier: GPL-2.0+ */ | ||||
| /*
 | ||||
|  * page.h - buffer/page management specific to NILFS | ||||
|  * Buffer/page management specific to NILFS | ||||
|  * | ||||
|  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | ||||
|  * | ||||
|  | ||||
| @ -1,6 +1,6 @@ | ||||
| // SPDX-License-Identifier: GPL-2.0+
 | ||||
| /*
 | ||||
|  * recovery.c - NILFS recovery logic | ||||
|  * NILFS recovery logic | ||||
|  * | ||||
|  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | ||||
|  * | ||||
|  | ||||
| @ -1,6 +1,6 @@ | ||||
| // SPDX-License-Identifier: GPL-2.0+
 | ||||
| /*
 | ||||
|  * segbuf.c - NILFS segment buffer | ||||
|  * NILFS segment buffer | ||||
|  * | ||||
|  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | ||||
|  * | ||||
|  | ||||
| @ -1,6 +1,6 @@ | ||||
| /* SPDX-License-Identifier: GPL-2.0+ */ | ||||
| /*
 | ||||
|  * segbuf.h - NILFS Segment buffer prototypes and definitions | ||||
|  * NILFS Segment buffer prototypes and definitions | ||||
|  * | ||||
|  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | ||||
|  * | ||||
|  | ||||
| @ -1,6 +1,6 @@ | ||||
| // SPDX-License-Identifier: GPL-2.0+
 | ||||
| /*
 | ||||
|  * segment.c - NILFS segment constructor. | ||||
|  * NILFS segment constructor. | ||||
|  * | ||||
|  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | ||||
|  * | ||||
|  | ||||
| @ -1,6 +1,6 @@ | ||||
| /* SPDX-License-Identifier: GPL-2.0+ */ | ||||
| /*
 | ||||
|  * segment.h - NILFS Segment constructor prototypes and definitions | ||||
|  * NILFS Segment constructor prototypes and definitions | ||||
|  * | ||||
|  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | ||||
|  * | ||||
|  | ||||
| @ -1,6 +1,6 @@ | ||||
| // SPDX-License-Identifier: GPL-2.0+
 | ||||
| /*
 | ||||
|  * sufile.c - NILFS segment usage file. | ||||
|  * NILFS segment usage file. | ||||
|  * | ||||
|  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. | ||||
|  * | ||||
|  | ||||
| @ -1,6 +1,6 @@ | ||||
| /* SPDX-License-Identifier: GPL-2.0+ */ | ||||
| /*
 | ||||
|  * sufile.h - NILFS segment usage file. | ||||
|  * NILFS segment usage file. | ||||
|  * | ||||
|  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. | ||||
|  * | ||||
|  | ||||
| @ -1,6 +1,6 @@ | ||||
| // SPDX-License-Identifier: GPL-2.0+
 | ||||
| /*
 | ||||
|  * super.c - NILFS module and super block management. | ||||
|  * NILFS module and super block management. | ||||
|  * | ||||
|  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | ||||
|  * | ||||
|  | ||||
| @ -1,6 +1,6 @@ | ||||
| // SPDX-License-Identifier: GPL-2.0+
 | ||||
| /*
 | ||||
|  * sysfs.c - sysfs support implementation. | ||||
|  * Sysfs support implementation. | ||||
|  * | ||||
|  * Copyright (C) 2005-2014 Nippon Telegraph and Telephone Corporation. | ||||
|  * Copyright (C) 2014 HGST, Inc., a Western Digital Company. | ||||
| @ -95,7 +95,7 @@ static ssize_t | ||||
| nilfs_snapshot_inodes_count_show(struct nilfs_snapshot_attr *attr, | ||||
| 				 struct nilfs_root *root, char *buf) | ||||
| { | ||||
| 	return snprintf(buf, PAGE_SIZE, "%llu\n", | ||||
| 	return sysfs_emit(buf, "%llu\n", | ||||
| 			(unsigned long long)atomic64_read(&root->inodes_count)); | ||||
| } | ||||
| 
 | ||||
| @ -103,7 +103,7 @@ static ssize_t | ||||
| nilfs_snapshot_blocks_count_show(struct nilfs_snapshot_attr *attr, | ||||
| 				 struct nilfs_root *root, char *buf) | ||||
| { | ||||
| 	return snprintf(buf, PAGE_SIZE, "%llu\n", | ||||
| 	return sysfs_emit(buf, "%llu\n", | ||||
| 			(unsigned long long)atomic64_read(&root->blocks_count)); | ||||
| } | ||||
| 
 | ||||
| @ -116,7 +116,7 @@ static ssize_t | ||||
| nilfs_snapshot_README_show(struct nilfs_snapshot_attr *attr, | ||||
| 			    struct nilfs_root *root, char *buf) | ||||
| { | ||||
| 	return snprintf(buf, PAGE_SIZE, snapshot_readme_str); | ||||
| 	return sysfs_emit(buf, snapshot_readme_str); | ||||
| } | ||||
| 
 | ||||
| NILFS_SNAPSHOT_RO_ATTR(inodes_count); | ||||
| @ -217,7 +217,7 @@ static ssize_t | ||||
| nilfs_mounted_snapshots_README_show(struct nilfs_mounted_snapshots_attr *attr, | ||||
| 				    struct the_nilfs *nilfs, char *buf) | ||||
| { | ||||
| 	return snprintf(buf, PAGE_SIZE, mounted_snapshots_readme_str); | ||||
| 	return sysfs_emit(buf, mounted_snapshots_readme_str); | ||||
| } | ||||
| 
 | ||||
| NILFS_MOUNTED_SNAPSHOTS_RO_ATTR(README); | ||||
| @ -255,7 +255,7 @@ nilfs_checkpoints_checkpoints_number_show(struct nilfs_checkpoints_attr *attr, | ||||
| 
 | ||||
| 	ncheckpoints = cpstat.cs_ncps; | ||||
| 
 | ||||
| 	return snprintf(buf, PAGE_SIZE, "%llu\n", ncheckpoints); | ||||
| 	return sysfs_emit(buf, "%llu\n", ncheckpoints); | ||||
| } | ||||
| 
 | ||||
| static ssize_t | ||||
| @ -278,7 +278,7 @@ nilfs_checkpoints_snapshots_number_show(struct nilfs_checkpoints_attr *attr, | ||||
| 
 | ||||
| 	nsnapshots = cpstat.cs_nsss; | ||||
| 
 | ||||
| 	return snprintf(buf, PAGE_SIZE, "%llu\n", nsnapshots); | ||||
| 	return sysfs_emit(buf, "%llu\n", nsnapshots); | ||||
| } | ||||
| 
 | ||||
| static ssize_t | ||||
| @ -292,7 +292,7 @@ nilfs_checkpoints_last_seg_checkpoint_show(struct nilfs_checkpoints_attr *attr, | ||||
| 	last_cno = nilfs->ns_last_cno; | ||||
| 	spin_unlock(&nilfs->ns_last_segment_lock); | ||||
| 
 | ||||
| 	return snprintf(buf, PAGE_SIZE, "%llu\n", last_cno); | ||||
| 	return sysfs_emit(buf, "%llu\n", last_cno); | ||||
| } | ||||
| 
 | ||||
| static ssize_t | ||||
| @ -306,7 +306,7 @@ nilfs_checkpoints_next_checkpoint_show(struct nilfs_checkpoints_attr *attr, | ||||
| 	cno = nilfs->ns_cno; | ||||
| 	up_read(&nilfs->ns_segctor_sem); | ||||
| 
 | ||||
| 	return snprintf(buf, PAGE_SIZE, "%llu\n", cno); | ||||
| 	return sysfs_emit(buf, "%llu\n", cno); | ||||
| } | ||||
| 
 | ||||
| static const char checkpoints_readme_str[] = | ||||
| @ -322,7 +322,7 @@ static ssize_t | ||||
| nilfs_checkpoints_README_show(struct nilfs_checkpoints_attr *attr, | ||||
| 				struct the_nilfs *nilfs, char *buf) | ||||
| { | ||||
| 	return snprintf(buf, PAGE_SIZE, checkpoints_readme_str); | ||||
| 	return sysfs_emit(buf, checkpoints_readme_str); | ||||
| } | ||||
| 
 | ||||
| NILFS_CHECKPOINTS_RO_ATTR(checkpoints_number); | ||||
| @ -353,7 +353,7 @@ nilfs_segments_segments_number_show(struct nilfs_segments_attr *attr, | ||||
| 				     struct the_nilfs *nilfs, | ||||
| 				     char *buf) | ||||
| { | ||||
| 	return snprintf(buf, PAGE_SIZE, "%lu\n", nilfs->ns_nsegments); | ||||
| 	return sysfs_emit(buf, "%lu\n", nilfs->ns_nsegments); | ||||
| } | ||||
| 
 | ||||
| static ssize_t | ||||
| @ -361,7 +361,7 @@ nilfs_segments_blocks_per_segment_show(struct nilfs_segments_attr *attr, | ||||
| 					struct the_nilfs *nilfs, | ||||
| 					char *buf) | ||||
| { | ||||
| 	return snprintf(buf, PAGE_SIZE, "%lu\n", nilfs->ns_blocks_per_segment); | ||||
| 	return sysfs_emit(buf, "%lu\n", nilfs->ns_blocks_per_segment); | ||||
| } | ||||
| 
 | ||||
| static ssize_t | ||||
| @ -375,7 +375,7 @@ nilfs_segments_clean_segments_show(struct nilfs_segments_attr *attr, | ||||
| 	ncleansegs = nilfs_sufile_get_ncleansegs(nilfs->ns_sufile); | ||||
| 	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); | ||||
| 
 | ||||
| 	return snprintf(buf, PAGE_SIZE, "%lu\n", ncleansegs); | ||||
| 	return sysfs_emit(buf, "%lu\n", ncleansegs); | ||||
| } | ||||
| 
 | ||||
| static ssize_t | ||||
| @ -395,7 +395,7 @@ nilfs_segments_dirty_segments_show(struct nilfs_segments_attr *attr, | ||||
| 		return err; | ||||
| 	} | ||||
| 
 | ||||
| 	return snprintf(buf, PAGE_SIZE, "%llu\n", sustat.ss_ndirtysegs); | ||||
| 	return sysfs_emit(buf, "%llu\n", sustat.ss_ndirtysegs); | ||||
| } | ||||
| 
 | ||||
| static const char segments_readme_str[] = | ||||
| @ -411,7 +411,7 @@ nilfs_segments_README_show(struct nilfs_segments_attr *attr, | ||||
| 			    struct the_nilfs *nilfs, | ||||
| 			    char *buf) | ||||
| { | ||||
| 	return snprintf(buf, PAGE_SIZE, segments_readme_str); | ||||
| 	return sysfs_emit(buf, segments_readme_str); | ||||
| } | ||||
| 
 | ||||
| NILFS_SEGMENTS_RO_ATTR(segments_number); | ||||
| @ -448,7 +448,7 @@ nilfs_segctor_last_pseg_block_show(struct nilfs_segctor_attr *attr, | ||||
| 	last_pseg = nilfs->ns_last_pseg; | ||||
| 	spin_unlock(&nilfs->ns_last_segment_lock); | ||||
| 
 | ||||
| 	return snprintf(buf, PAGE_SIZE, "%llu\n", | ||||
| 	return sysfs_emit(buf, "%llu\n", | ||||
| 			(unsigned long long)last_pseg); | ||||
| } | ||||
| 
 | ||||
| @ -463,7 +463,7 @@ nilfs_segctor_last_seg_sequence_show(struct nilfs_segctor_attr *attr, | ||||
| 	last_seq = nilfs->ns_last_seq; | ||||
| 	spin_unlock(&nilfs->ns_last_segment_lock); | ||||
| 
 | ||||
| 	return snprintf(buf, PAGE_SIZE, "%llu\n", last_seq); | ||||
| 	return sysfs_emit(buf, "%llu\n", last_seq); | ||||
| } | ||||
| 
 | ||||
| static ssize_t | ||||
| @ -477,7 +477,7 @@ nilfs_segctor_last_seg_checkpoint_show(struct nilfs_segctor_attr *attr, | ||||
| 	last_cno = nilfs->ns_last_cno; | ||||
| 	spin_unlock(&nilfs->ns_last_segment_lock); | ||||
| 
 | ||||
| 	return snprintf(buf, PAGE_SIZE, "%llu\n", last_cno); | ||||
| 	return sysfs_emit(buf, "%llu\n", last_cno); | ||||
| } | ||||
| 
 | ||||
| static ssize_t | ||||
| @ -491,7 +491,7 @@ nilfs_segctor_current_seg_sequence_show(struct nilfs_segctor_attr *attr, | ||||
| 	seg_seq = nilfs->ns_seg_seq; | ||||
| 	up_read(&nilfs->ns_segctor_sem); | ||||
| 
 | ||||
| 	return snprintf(buf, PAGE_SIZE, "%llu\n", seg_seq); | ||||
| 	return sysfs_emit(buf, "%llu\n", seg_seq); | ||||
| } | ||||
| 
 | ||||
| static ssize_t | ||||
| @ -505,7 +505,7 @@ nilfs_segctor_current_last_full_seg_show(struct nilfs_segctor_attr *attr, | ||||
| 	segnum = nilfs->ns_segnum; | ||||
| 	up_read(&nilfs->ns_segctor_sem); | ||||
| 
 | ||||
| 	return snprintf(buf, PAGE_SIZE, "%llu\n", segnum); | ||||
| 	return sysfs_emit(buf, "%llu\n", segnum); | ||||
| } | ||||
| 
 | ||||
| static ssize_t | ||||
| @ -519,7 +519,7 @@ nilfs_segctor_next_full_seg_show(struct nilfs_segctor_attr *attr, | ||||
| 	nextnum = nilfs->ns_nextnum; | ||||
| 	up_read(&nilfs->ns_segctor_sem); | ||||
| 
 | ||||
| 	return snprintf(buf, PAGE_SIZE, "%llu\n", nextnum); | ||||
| 	return sysfs_emit(buf, "%llu\n", nextnum); | ||||
| } | ||||
| 
 | ||||
| static ssize_t | ||||
| @ -533,7 +533,7 @@ nilfs_segctor_next_pseg_offset_show(struct nilfs_segctor_attr *attr, | ||||
| 	pseg_offset = nilfs->ns_pseg_offset; | ||||
| 	up_read(&nilfs->ns_segctor_sem); | ||||
| 
 | ||||
| 	return snprintf(buf, PAGE_SIZE, "%lu\n", pseg_offset); | ||||
| 	return sysfs_emit(buf, "%lu\n", pseg_offset); | ||||
| } | ||||
| 
 | ||||
| static ssize_t | ||||
| @ -547,7 +547,7 @@ nilfs_segctor_next_checkpoint_show(struct nilfs_segctor_attr *attr, | ||||
| 	cno = nilfs->ns_cno; | ||||
| 	up_read(&nilfs->ns_segctor_sem); | ||||
| 
 | ||||
| 	return snprintf(buf, PAGE_SIZE, "%llu\n", cno); | ||||
| 	return sysfs_emit(buf, "%llu\n", cno); | ||||
| } | ||||
| 
 | ||||
| static ssize_t | ||||
| @ -575,7 +575,7 @@ nilfs_segctor_last_seg_write_time_secs_show(struct nilfs_segctor_attr *attr, | ||||
| 	ctime = nilfs->ns_ctime; | ||||
| 	up_read(&nilfs->ns_segctor_sem); | ||||
| 
 | ||||
| 	return snprintf(buf, PAGE_SIZE, "%llu\n", ctime); | ||||
| 	return sysfs_emit(buf, "%llu\n", ctime); | ||||
| } | ||||
| 
 | ||||
| static ssize_t | ||||
| @ -603,7 +603,7 @@ nilfs_segctor_last_nongc_write_time_secs_show(struct nilfs_segctor_attr *attr, | ||||
| 	nongc_ctime = nilfs->ns_nongc_ctime; | ||||
| 	up_read(&nilfs->ns_segctor_sem); | ||||
| 
 | ||||
| 	return snprintf(buf, PAGE_SIZE, "%llu\n", nongc_ctime); | ||||
| 	return sysfs_emit(buf, "%llu\n", nongc_ctime); | ||||
| } | ||||
| 
 | ||||
| static ssize_t | ||||
| @ -617,7 +617,7 @@ nilfs_segctor_dirty_data_blocks_count_show(struct nilfs_segctor_attr *attr, | ||||
| 	ndirtyblks = atomic_read(&nilfs->ns_ndirtyblks); | ||||
| 	up_read(&nilfs->ns_segctor_sem); | ||||
| 
 | ||||
| 	return snprintf(buf, PAGE_SIZE, "%u\n", ndirtyblks); | ||||
| 	return sysfs_emit(buf, "%u\n", ndirtyblks); | ||||
| } | ||||
| 
 | ||||
| static const char segctor_readme_str[] = | ||||
| @ -654,7 +654,7 @@ static ssize_t | ||||
| nilfs_segctor_README_show(struct nilfs_segctor_attr *attr, | ||||
| 			  struct the_nilfs *nilfs, char *buf) | ||||
| { | ||||
| 	return snprintf(buf, PAGE_SIZE, segctor_readme_str); | ||||
| 	return sysfs_emit(buf, segctor_readme_str); | ||||
| } | ||||
| 
 | ||||
| NILFS_SEGCTOR_RO_ATTR(last_pseg_block); | ||||
| @ -723,7 +723,7 @@ nilfs_superblock_sb_write_time_secs_show(struct nilfs_superblock_attr *attr, | ||||
| 	sbwtime = nilfs->ns_sbwtime; | ||||
| 	up_read(&nilfs->ns_sem); | ||||
| 
 | ||||
| 	return snprintf(buf, PAGE_SIZE, "%llu\n", sbwtime); | ||||
| 	return sysfs_emit(buf, "%llu\n", sbwtime); | ||||
| } | ||||
| 
 | ||||
| static ssize_t | ||||
| @ -737,7 +737,7 @@ nilfs_superblock_sb_write_count_show(struct nilfs_superblock_attr *attr, | ||||
| 	sbwcount = nilfs->ns_sbwcount; | ||||
| 	up_read(&nilfs->ns_sem); | ||||
| 
 | ||||
| 	return snprintf(buf, PAGE_SIZE, "%u\n", sbwcount); | ||||
| 	return sysfs_emit(buf, "%u\n", sbwcount); | ||||
| } | ||||
| 
 | ||||
| static ssize_t | ||||
| @ -751,7 +751,7 @@ nilfs_superblock_sb_update_frequency_show(struct nilfs_superblock_attr *attr, | ||||
| 	sb_update_freq = nilfs->ns_sb_update_freq; | ||||
| 	up_read(&nilfs->ns_sem); | ||||
| 
 | ||||
| 	return snprintf(buf, PAGE_SIZE, "%u\n", sb_update_freq); | ||||
| 	return sysfs_emit(buf, "%u\n", sb_update_freq); | ||||
| } | ||||
| 
 | ||||
| static ssize_t | ||||
| @ -799,7 +799,7 @@ static ssize_t | ||||
| nilfs_superblock_README_show(struct nilfs_superblock_attr *attr, | ||||
| 				struct the_nilfs *nilfs, char *buf) | ||||
| { | ||||
| 	return snprintf(buf, PAGE_SIZE, sb_readme_str); | ||||
| 	return sysfs_emit(buf, sb_readme_str); | ||||
| } | ||||
| 
 | ||||
| NILFS_SUPERBLOCK_RO_ATTR(sb_write_time); | ||||
| @ -834,7 +834,7 @@ ssize_t nilfs_dev_revision_show(struct nilfs_dev_attr *attr, | ||||
| 	u32 major = le32_to_cpu(sbp[0]->s_rev_level); | ||||
| 	u16 minor = le16_to_cpu(sbp[0]->s_minor_rev_level); | ||||
| 
 | ||||
| 	return snprintf(buf, PAGE_SIZE, "%d.%d\n", major, minor); | ||||
| 	return sysfs_emit(buf, "%d.%d\n", major, minor); | ||||
| } | ||||
| 
 | ||||
| static | ||||
| @ -842,7 +842,7 @@ ssize_t nilfs_dev_blocksize_show(struct nilfs_dev_attr *attr, | ||||
| 				 struct the_nilfs *nilfs, | ||||
| 				 char *buf) | ||||
| { | ||||
| 	return snprintf(buf, PAGE_SIZE, "%u\n", nilfs->ns_blocksize); | ||||
| 	return sysfs_emit(buf, "%u\n", nilfs->ns_blocksize); | ||||
| } | ||||
| 
 | ||||
| static | ||||
| @ -853,7 +853,7 @@ ssize_t nilfs_dev_device_size_show(struct nilfs_dev_attr *attr, | ||||
| 	struct nilfs_super_block **sbp = nilfs->ns_sbp; | ||||
| 	u64 dev_size = le64_to_cpu(sbp[0]->s_dev_size); | ||||
| 
 | ||||
| 	return snprintf(buf, PAGE_SIZE, "%llu\n", dev_size); | ||||
| 	return sysfs_emit(buf, "%llu\n", dev_size); | ||||
| } | ||||
| 
 | ||||
| static | ||||
| @ -864,7 +864,7 @@ ssize_t nilfs_dev_free_blocks_show(struct nilfs_dev_attr *attr, | ||||
| 	sector_t free_blocks = 0; | ||||
| 
 | ||||
| 	nilfs_count_free_blocks(nilfs, &free_blocks); | ||||
| 	return snprintf(buf, PAGE_SIZE, "%llu\n", | ||||
| 	return sysfs_emit(buf, "%llu\n", | ||||
| 			(unsigned long long)free_blocks); | ||||
| } | ||||
| 
 | ||||
| @ -875,7 +875,7 @@ ssize_t nilfs_dev_uuid_show(struct nilfs_dev_attr *attr, | ||||
| { | ||||
| 	struct nilfs_super_block **sbp = nilfs->ns_sbp; | ||||
| 
 | ||||
| 	return snprintf(buf, PAGE_SIZE, "%pUb\n", sbp[0]->s_uuid); | ||||
| 	return sysfs_emit(buf, "%pUb\n", sbp[0]->s_uuid); | ||||
| } | ||||
| 
 | ||||
| static | ||||
| @ -903,7 +903,7 @@ static ssize_t nilfs_dev_README_show(struct nilfs_dev_attr *attr, | ||||
| 				     struct the_nilfs *nilfs, | ||||
| 				     char *buf) | ||||
| { | ||||
| 	return snprintf(buf, PAGE_SIZE, dev_readme_str); | ||||
| 	return sysfs_emit(buf, dev_readme_str); | ||||
| } | ||||
| 
 | ||||
| NILFS_DEV_RO_ATTR(revision); | ||||
| @ -1047,7 +1047,7 @@ void nilfs_sysfs_delete_device_group(struct the_nilfs *nilfs) | ||||
| static ssize_t nilfs_feature_revision_show(struct kobject *kobj, | ||||
| 					    struct attribute *attr, char *buf) | ||||
| { | ||||
| 	return snprintf(buf, PAGE_SIZE, "%d.%d\n", | ||||
| 	return sysfs_emit(buf, "%d.%d\n", | ||||
| 			NILFS_CURRENT_REV, NILFS_MINOR_REV); | ||||
| } | ||||
| 
 | ||||
| @ -1060,7 +1060,7 @@ static ssize_t nilfs_feature_README_show(struct kobject *kobj, | ||||
| 					 struct attribute *attr, | ||||
| 					 char *buf) | ||||
| { | ||||
| 	return snprintf(buf, PAGE_SIZE, features_readme_str); | ||||
| 	return sysfs_emit(buf, features_readme_str); | ||||
| } | ||||
| 
 | ||||
| NILFS_FEATURE_RO_ATTR(revision); | ||||
|  | ||||
| @ -1,6 +1,6 @@ | ||||
| /* SPDX-License-Identifier: GPL-2.0+ */ | ||||
| /*
 | ||||
|  * sysfs.h - sysfs support declarations. | ||||
|  * Sysfs support declarations. | ||||
|  * | ||||
|  * Copyright (C) 2005-2014 Nippon Telegraph and Telephone Corporation. | ||||
|  * Copyright (C) 2014 HGST, Inc., a Western Digital Company. | ||||
|  | ||||
| @ -1,6 +1,6 @@ | ||||
| // SPDX-License-Identifier: GPL-2.0+
 | ||||
| /*
 | ||||
|  * the_nilfs.c - the_nilfs shared structure. | ||||
|  * the_nilfs shared structure. | ||||
|  * | ||||
|  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | ||||
|  * | ||||
|  | ||||
| @ -1,6 +1,6 @@ | ||||
| /* SPDX-License-Identifier: GPL-2.0+ */ | ||||
| /*
 | ||||
|  * the_nilfs.h - the_nilfs shared structure. | ||||
|  * the_nilfs shared structure. | ||||
|  * | ||||
|  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | ||||
|  * | ||||
|  | ||||
| @ -1982,19 +1982,21 @@ static int pid_revalidate(struct dentry *dentry, unsigned int flags) | ||||
| { | ||||
| 	struct inode *inode; | ||||
| 	struct task_struct *task; | ||||
| 	int ret = 0; | ||||
| 
 | ||||
| 	if (flags & LOOKUP_RCU) | ||||
| 		return -ECHILD; | ||||
| 
 | ||||
| 	inode = d_inode(dentry); | ||||
| 	task = get_proc_task(inode); | ||||
| 	rcu_read_lock(); | ||||
| 	inode = d_inode_rcu(dentry); | ||||
| 	if (!inode) | ||||
| 		goto out; | ||||
| 	task = pid_task(proc_pid(inode), PIDTYPE_PID); | ||||
| 
 | ||||
| 	if (task) { | ||||
| 		pid_update_inode(task, inode); | ||||
| 		put_task_struct(task); | ||||
| 		return 1; | ||||
| 		ret = 1; | ||||
| 	} | ||||
| 	return 0; | ||||
| out: | ||||
| 	rcu_read_unlock(); | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| static inline bool proc_inode_is_dead(struct inode *inode) | ||||
| @ -3802,7 +3804,10 @@ static int proc_task_readdir(struct file *file, struct dir_context *ctx) | ||||
| 	     task = next_tid(task), ctx->pos++) { | ||||
| 		char name[10 + 1]; | ||||
| 		unsigned int len; | ||||
| 
 | ||||
| 		tid = task_pid_nr_ns(task, ns); | ||||
| 		if (!tid) | ||||
| 			continue;	/* The task has just exited. */ | ||||
| 		len = snprintf(name, sizeof(name), "%u", tid); | ||||
| 		if (!proc_fill_cache(file, ctx, name, len, | ||||
| 				proc_task_instantiate, task, NULL)) { | ||||
|  | ||||
							
								
								
									
										113
									
								
								fs/proc/vmcore.c
									
									
									
									
									
								
							
							
						
						
									
										113
									
								
								fs/proc/vmcore.c
									
									
									
									
									
								
							| @ -62,46 +62,75 @@ core_param(novmcoredd, vmcoredd_disabled, bool, 0); | ||||
| /* Device Dump Size */ | ||||
| static size_t vmcoredd_orig_sz; | ||||
| 
 | ||||
| /*
 | ||||
|  * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error | ||||
|  * The called function has to take care of module refcounting. | ||||
|  */ | ||||
| static int (*oldmem_pfn_is_ram)(unsigned long pfn); | ||||
| static DECLARE_RWSEM(vmcore_cb_rwsem); | ||||
| /* List of registered vmcore callbacks. */ | ||||
| static LIST_HEAD(vmcore_cb_list); | ||||
| /* Whether we had a surprise unregistration of a callback. */ | ||||
| static bool vmcore_cb_unstable; | ||||
| /* Whether the vmcore has been opened once. */ | ||||
| static bool vmcore_opened; | ||||
| 
 | ||||
| int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn)) | ||||
| void register_vmcore_cb(struct vmcore_cb *cb) | ||||
| { | ||||
| 	if (oldmem_pfn_is_ram) | ||||
| 		return -EBUSY; | ||||
| 	oldmem_pfn_is_ram = fn; | ||||
| 	return 0; | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(register_oldmem_pfn_is_ram); | ||||
| 
 | ||||
| void unregister_oldmem_pfn_is_ram(void) | ||||
| { | ||||
| 	oldmem_pfn_is_ram = NULL; | ||||
| 	wmb(); | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(unregister_oldmem_pfn_is_ram); | ||||
| 
 | ||||
| static int pfn_is_ram(unsigned long pfn) | ||||
| { | ||||
| 	int (*fn)(unsigned long pfn); | ||||
| 	/* pfn is ram unless fn() checks pagetype */ | ||||
| 	int ret = 1; | ||||
| 
 | ||||
| 	down_write(&vmcore_cb_rwsem); | ||||
| 	INIT_LIST_HEAD(&cb->next); | ||||
| 	list_add_tail(&cb->next, &vmcore_cb_list); | ||||
| 	/*
 | ||||
| 	 * Ask hypervisor if the pfn is really ram. | ||||
| 	 * A ballooned page contains no data and reading from such a page | ||||
| 	 * will cause high load in the hypervisor. | ||||
| 	 * Registering a vmcore callback after the vmcore was opened is | ||||
| 	 * very unusual (e.g., manual driver loading). | ||||
| 	 */ | ||||
| 	fn = oldmem_pfn_is_ram; | ||||
| 	if (fn) | ||||
| 		ret = fn(pfn); | ||||
| 	if (vmcore_opened) | ||||
| 		pr_warn_once("Unexpected vmcore callback registration\n"); | ||||
| 	up_write(&vmcore_cb_rwsem); | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(register_vmcore_cb); | ||||
| 
 | ||||
| void unregister_vmcore_cb(struct vmcore_cb *cb) | ||||
| { | ||||
| 	down_write(&vmcore_cb_rwsem); | ||||
| 	list_del(&cb->next); | ||||
| 	/*
 | ||||
| 	 * Unregistering a vmcore callback after the vmcore was opened is | ||||
| 	 * very unusual (e.g., forced driver removal), but we cannot stop | ||||
| 	 * unregistering. | ||||
| 	 */ | ||||
| 	if (vmcore_opened) { | ||||
| 		pr_warn_once("Unexpected vmcore callback unregistration\n"); | ||||
| 		vmcore_cb_unstable = true; | ||||
| 	} | ||||
| 	up_write(&vmcore_cb_rwsem); | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(unregister_vmcore_cb); | ||||
| 
 | ||||
| static bool pfn_is_ram(unsigned long pfn) | ||||
| { | ||||
| 	struct vmcore_cb *cb; | ||||
| 	bool ret = true; | ||||
| 
 | ||||
| 	lockdep_assert_held_read(&vmcore_cb_rwsem); | ||||
| 	if (unlikely(vmcore_cb_unstable)) | ||||
| 		return false; | ||||
| 
 | ||||
| 	list_for_each_entry(cb, &vmcore_cb_list, next) { | ||||
| 		if (unlikely(!cb->pfn_is_ram)) | ||||
| 			continue; | ||||
| 		ret = cb->pfn_is_ram(cb, pfn); | ||||
| 		if (!ret) | ||||
| 			break; | ||||
| 	} | ||||
| 
 | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| static int open_vmcore(struct inode *inode, struct file *file) | ||||
| { | ||||
| 	down_read(&vmcore_cb_rwsem); | ||||
| 	vmcore_opened = true; | ||||
| 	up_read(&vmcore_cb_rwsem); | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| /* Reads a page from the oldmem device from given offset. */ | ||||
| ssize_t read_from_oldmem(char *buf, size_t count, | ||||
| 			 u64 *ppos, int userbuf, | ||||
| @ -117,6 +146,7 @@ ssize_t read_from_oldmem(char *buf, size_t count, | ||||
| 	offset = (unsigned long)(*ppos % PAGE_SIZE); | ||||
| 	pfn = (unsigned long)(*ppos / PAGE_SIZE); | ||||
| 
 | ||||
| 	down_read(&vmcore_cb_rwsem); | ||||
| 	do { | ||||
| 		if (count > (PAGE_SIZE - offset)) | ||||
| 			nr_bytes = PAGE_SIZE - offset; | ||||
| @ -124,7 +154,7 @@ ssize_t read_from_oldmem(char *buf, size_t count, | ||||
| 			nr_bytes = count; | ||||
| 
 | ||||
| 		/* If pfn is not ram, return zeros for sparse dump files */ | ||||
| 		if (pfn_is_ram(pfn) == 0) | ||||
| 		if (!pfn_is_ram(pfn)) | ||||
| 			memset(buf, 0, nr_bytes); | ||||
| 		else { | ||||
| 			if (encrypted) | ||||
| @ -136,8 +166,10 @@ ssize_t read_from_oldmem(char *buf, size_t count, | ||||
| 				tmp = copy_oldmem_page(pfn, buf, nr_bytes, | ||||
| 						       offset, userbuf); | ||||
| 
 | ||||
| 			if (tmp < 0) | ||||
| 			if (tmp < 0) { | ||||
| 				up_read(&vmcore_cb_rwsem); | ||||
| 				return tmp; | ||||
| 			} | ||||
| 		} | ||||
| 		*ppos += nr_bytes; | ||||
| 		count -= nr_bytes; | ||||
| @ -147,6 +179,7 @@ ssize_t read_from_oldmem(char *buf, size_t count, | ||||
| 		offset = 0; | ||||
| 	} while (count); | ||||
| 
 | ||||
| 	up_read(&vmcore_cb_rwsem); | ||||
| 	return read; | ||||
| } | ||||
| 
 | ||||
| @ -537,14 +570,19 @@ static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma, | ||||
| 			    unsigned long from, unsigned long pfn, | ||||
| 			    unsigned long size, pgprot_t prot) | ||||
| { | ||||
| 	int ret; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Check if oldmem_pfn_is_ram was registered to avoid | ||||
| 	 * looping over all pages without a reason. | ||||
| 	 */ | ||||
| 	if (oldmem_pfn_is_ram) | ||||
| 		return remap_oldmem_pfn_checked(vma, from, pfn, size, prot); | ||||
| 	down_read(&vmcore_cb_rwsem); | ||||
| 	if (!list_empty(&vmcore_cb_list) || vmcore_cb_unstable) | ||||
| 		ret = remap_oldmem_pfn_checked(vma, from, pfn, size, prot); | ||||
| 	else | ||||
| 		return remap_oldmem_pfn_range(vma, from, pfn, size, prot); | ||||
| 		ret = remap_oldmem_pfn_range(vma, from, pfn, size, prot); | ||||
| 	up_read(&vmcore_cb_rwsem); | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| static int mmap_vmcore(struct file *file, struct vm_area_struct *vma) | ||||
| @ -668,6 +706,7 @@ static int mmap_vmcore(struct file *file, struct vm_area_struct *vma) | ||||
| #endif | ||||
| 
 | ||||
| static const struct proc_ops vmcore_proc_ops = { | ||||
| 	.proc_open	= open_vmcore, | ||||
| 	.proc_read	= read_vmcore, | ||||
| 	.proc_lseek	= default_llseek, | ||||
| 	.proc_mmap	= mmap_vmcore, | ||||
|  | ||||
| @ -204,17 +204,20 @@ static int ramfs_parse_param(struct fs_context *fc, struct fs_parameter *param) | ||||
| 	int opt; | ||||
| 
 | ||||
| 	opt = fs_parse(fc, ramfs_fs_parameters, param, &result); | ||||
| 	if (opt < 0) { | ||||
| 	if (opt == -ENOPARAM) { | ||||
| 		opt = vfs_parse_fs_param_source(fc, param); | ||||
| 		if (opt != -ENOPARAM) | ||||
| 			return opt; | ||||
| 		/*
 | ||||
| 		 * We might like to report bad mount options here; | ||||
| 		 * but traditionally ramfs has ignored all mount options, | ||||
| 		 * and as it is used as a !CONFIG_SHMEM simple substitute | ||||
| 		 * for tmpfs, better continue to ignore other mount options. | ||||
| 		 */ | ||||
| 		if (opt == -ENOPARAM) | ||||
| 			opt = 0; | ||||
| 		return opt; | ||||
| 		return 0; | ||||
| 	} | ||||
| 	if (opt < 0) | ||||
| 		return opt; | ||||
| 
 | ||||
| 	switch (opt) { | ||||
| 	case Opt_mode: | ||||
|  | ||||
| @ -383,22 +383,6 @@ void seq_escape_mem(struct seq_file *m, const char *src, size_t len, | ||||
| } | ||||
| EXPORT_SYMBOL(seq_escape_mem); | ||||
| 
 | ||||
| /**
 | ||||
|  *	seq_escape -	print string into buffer, escaping some characters | ||||
|  *	@m:	target buffer | ||||
|  *	@s:	string | ||||
|  *	@esc:	set of characters that need escaping | ||||
|  * | ||||
|  *	Puts string into buffer, replacing each occurrence of character from | ||||
|  *	@esc with usual octal escape. | ||||
|  *	Use seq_has_overflowed() to check for errors. | ||||
|  */ | ||||
| void seq_escape(struct seq_file *m, const char *s, const char *esc) | ||||
| { | ||||
| 	seq_escape_str(m, s, ESCAPE_OCTAL, esc); | ||||
| } | ||||
| EXPORT_SYMBOL(seq_escape); | ||||
| 
 | ||||
| void seq_vprintf(struct seq_file *m, const char *f, va_list args) | ||||
| { | ||||
| 	int len; | ||||
|  | ||||
| @ -474,10 +474,8 @@ static int v7_fill_super(struct super_block *sb, void *data, int silent) | ||||
| 	struct sysv_sb_info *sbi; | ||||
| 	struct buffer_head *bh; | ||||
| 
 | ||||
| 	if (440 != sizeof (struct v7_super_block)) | ||||
| 		panic("V7 FS: bad super-block size"); | ||||
| 	if (64 != sizeof (struct sysv_inode)) | ||||
| 		panic("sysv fs: bad i-node size"); | ||||
| 	BUILD_BUG_ON(sizeof(struct v7_super_block) != 440); | ||||
| 	BUILD_BUG_ON(sizeof(struct sysv_inode) != 64); | ||||
| 
 | ||||
| 	sbi = kzalloc(sizeof(struct sysv_sb_info), GFP_KERNEL); | ||||
| 	if (!sbi) | ||||
|  | ||||
| @ -64,22 +64,6 @@ extern __visible const void __nosave_begin, __nosave_end; | ||||
| #define dereference_kernel_function_descriptor(p) ((void *)(p)) | ||||
| #endif | ||||
| 
 | ||||
| /* random extra sections (if any).  Override
 | ||||
|  * in asm/sections.h */ | ||||
| #ifndef arch_is_kernel_text | ||||
| static inline int arch_is_kernel_text(unsigned long addr) | ||||
| { | ||||
| 	return 0; | ||||
| } | ||||
| #endif | ||||
| 
 | ||||
| #ifndef arch_is_kernel_data | ||||
| static inline int arch_is_kernel_data(unsigned long addr) | ||||
| { | ||||
| 	return 0; | ||||
| } | ||||
| #endif | ||||
| 
 | ||||
| /**
 | ||||
|  * memory_contains - checks if an object is contained within a memory region | ||||
|  * @begin: virtual address of the beginning of the memory region | ||||
| @ -144,6 +128,22 @@ static inline bool init_section_intersects(void *virt, size_t size) | ||||
| 	return memory_intersects(__init_begin, __init_end, virt, size); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * is_kernel_core_data - checks if the pointer address is located in the | ||||
|  *			 .data section | ||||
|  * | ||||
|  * @addr: address to check | ||||
|  * | ||||
|  * Returns: true if the address is located in .data, false otherwise. | ||||
|  * Note: On some archs it may return true for core RODATA, and false | ||||
|  *       for others. But will always be true for core RW data. | ||||
|  */ | ||||
| static inline bool is_kernel_core_data(unsigned long addr) | ||||
| { | ||||
| 	return addr >= (unsigned long)_sdata && | ||||
| 	       addr < (unsigned long)_edata; | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * is_kernel_rodata - checks if the pointer address is located in the | ||||
|  *                    .rodata section | ||||
| @ -158,4 +158,47 @@ static inline bool is_kernel_rodata(unsigned long addr) | ||||
| 	       addr < (unsigned long)__end_rodata; | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * is_kernel_inittext - checks if the pointer address is located in the | ||||
|  *                      .init.text section | ||||
|  * | ||||
|  * @addr: address to check | ||||
|  * | ||||
|  * Returns: true if the address is located in .init.text, false otherwise. | ||||
|  */ | ||||
| static inline bool is_kernel_inittext(unsigned long addr) | ||||
| { | ||||
| 	return addr >= (unsigned long)_sinittext && | ||||
| 	       addr < (unsigned long)_einittext; | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * __is_kernel_text - checks if the pointer address is located in the | ||||
|  *                    .text section | ||||
|  * | ||||
|  * @addr: address to check | ||||
|  * | ||||
|  * Returns: true if the address is located in .text, false otherwise. | ||||
|  * Note: an internal helper, only check the range of _stext to _etext. | ||||
|  */ | ||||
| static inline bool __is_kernel_text(unsigned long addr) | ||||
| { | ||||
| 	return addr >= (unsigned long)_stext && | ||||
| 	       addr < (unsigned long)_etext; | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * __is_kernel - checks if the pointer address is located in the kernel range | ||||
|  * | ||||
|  * @addr: address to check | ||||
|  * | ||||
|  * Returns: true if the address is located in the kernel range, false otherwise. | ||||
|  * Note: an internal helper, only check the range of _stext to _end. | ||||
|  */ | ||||
| static inline bool __is_kernel(unsigned long addr) | ||||
| { | ||||
| 	return addr >= (unsigned long)_stext && | ||||
| 	       addr < (unsigned long)_end; | ||||
| } | ||||
| 
 | ||||
| #endif /* _ASM_GENERIC_SECTIONS_H_ */ | ||||
|  | ||||
| @ -11,11 +11,20 @@ | ||||
| 
 | ||||
| #include <kunit/assert.h> | ||||
| #include <kunit/try-catch.h> | ||||
| #include <linux/kernel.h> | ||||
| 
 | ||||
| #include <linux/container_of.h> | ||||
| #include <linux/err.h> | ||||
| #include <linux/init.h> | ||||
| #include <linux/kconfig.h> | ||||
| #include <linux/kref.h> | ||||
| #include <linux/list.h> | ||||
| #include <linux/module.h> | ||||
| #include <linux/slab.h> | ||||
| #include <linux/spinlock.h> | ||||
| #include <linux/string.h> | ||||
| #include <linux/types.h> | ||||
| #include <linux/kref.h> | ||||
| 
 | ||||
| #include <asm/rwonce.h> | ||||
| 
 | ||||
| struct kunit_resource; | ||||
| 
 | ||||
|  | ||||
| @ -2,6 +2,7 @@ | ||||
| #ifndef _LINUX_BH_H | ||||
| #define _LINUX_BH_H | ||||
| 
 | ||||
| #include <linux/instruction_pointer.h> | ||||
| #include <linux/preempt.h> | ||||
| 
 | ||||
| #if defined(CONFIG_PREEMPT_RT) || defined(CONFIG_TRACE_IRQFLAGS) | ||||
|  | ||||
							
								
								
									
										40
									
								
								include/linux/container_of.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										40
									
								
								include/linux/container_of.h
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,40 @@ | ||||
| /* SPDX-License-Identifier: GPL-2.0 */ | ||||
| #ifndef _LINUX_CONTAINER_OF_H | ||||
| #define _LINUX_CONTAINER_OF_H | ||||
| 
 | ||||
| #include <linux/build_bug.h> | ||||
| #include <linux/err.h> | ||||
| 
 | ||||
| #define typeof_member(T, m)	typeof(((T*)0)->m) | ||||
| 
 | ||||
| /**
 | ||||
|  * container_of - cast a member of a structure out to the containing structure | ||||
|  * @ptr:	the pointer to the member. | ||||
|  * @type:	the type of the container struct this is embedded in. | ||||
|  * @member:	the name of the member within the struct. | ||||
|  * | ||||
|  */ | ||||
| #define container_of(ptr, type, member) ({				\ | ||||
| 	void *__mptr = (void *)(ptr);					\ | ||||
| 	static_assert(__same_type(*(ptr), ((type *)0)->member) ||	\ | ||||
| 		      __same_type(*(ptr), void),			\ | ||||
| 		      "pointer type mismatch in container_of()");	\ | ||||
| 	((type *)(__mptr - offsetof(type, member))); }) | ||||
| 
 | ||||
| /**
 | ||||
|  * container_of_safe - cast a member of a structure out to the containing structure | ||||
|  * @ptr:	the pointer to the member. | ||||
|  * @type:	the type of the container struct this is embedded in. | ||||
|  * @member:	the name of the member within the struct. | ||||
|  * | ||||
|  * If IS_ERR_OR_NULL(ptr), ptr is returned unchanged. | ||||
|  */ | ||||
| #define container_of_safe(ptr, type, member) ({				\ | ||||
| 	void *__mptr = (void *)(ptr);					\ | ||||
| 	static_assert(__same_type(*(ptr), ((type *)0)->member) ||	\ | ||||
| 		      __same_type(*(ptr), void),			\ | ||||
| 		      "pointer type mismatch in container_of_safe()");	\ | ||||
| 	IS_ERR_OR_NULL(__mptr) ? ERR_CAST(__mptr) :			\ | ||||
| 		((type *)(__mptr - offsetof(type, member))); }) | ||||
| 
 | ||||
| #endif	/* _LINUX_CONTAINER_OF_H */ | ||||
| @ -8,8 +8,6 @@ | ||||
| #include <linux/pgtable.h> | ||||
| #include <uapi/linux/vmcore.h> | ||||
| 
 | ||||
| #include <linux/pgtable.h> /* for pgprot_t */ | ||||
| 
 | ||||
| /* For IS_ENABLED(CONFIG_CRASH_DUMP) */ | ||||
| #define ELFCORE_ADDR_MAX	(-1ULL) | ||||
| #define ELFCORE_ADDR_ERR	(-2ULL) | ||||
| @ -91,12 +89,32 @@ static inline void vmcore_unusable(void) | ||||
| 		elfcorehdr_addr = ELFCORE_ADDR_ERR; | ||||
| } | ||||
| 
 | ||||
| #define HAVE_OLDMEM_PFN_IS_RAM 1 | ||||
| extern int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn)); | ||||
| extern void unregister_oldmem_pfn_is_ram(void); | ||||
| /**
 | ||||
|  * struct vmcore_cb - driver callbacks for /proc/vmcore handling | ||||
|  * @pfn_is_ram: check whether a PFN really is RAM and should be accessed when | ||||
|  *              reading the vmcore. Will return "true" if it is RAM or if the | ||||
|  *              callback cannot tell. If any callback returns "false", it's not | ||||
|  *              RAM and the page must not be accessed; zeroes should be | ||||
|  *              indicated in the vmcore instead. For example, a ballooned page | ||||
|  *              contains no data and reading from such a page will cause high | ||||
|  *              load in the hypervisor. | ||||
|  * @next: List head to manage registered callbacks internally; initialized by | ||||
|  *        register_vmcore_cb(). | ||||
|  * | ||||
|  * vmcore callbacks allow drivers managing physical memory ranges to | ||||
|  * coordinate with vmcore handling code, for example, to prevent accessing | ||||
|  * physical memory ranges that should not be accessed when reading the vmcore, | ||||
|  * although included in the vmcore header as memory ranges to dump. | ||||
|  */ | ||||
| struct vmcore_cb { | ||||
| 	bool (*pfn_is_ram)(struct vmcore_cb *cb, unsigned long pfn); | ||||
| 	struct list_head next; | ||||
| }; | ||||
| extern void register_vmcore_cb(struct vmcore_cb *cb); | ||||
| extern void unregister_vmcore_cb(struct vmcore_cb *cb); | ||||
| 
 | ||||
| #else /* !CONFIG_CRASH_DUMP */ | ||||
| static inline bool is_kdump_kernel(void) { return 0; } | ||||
| static inline bool is_kdump_kernel(void) { return false; } | ||||
| #endif /* CONFIG_CRASH_DUMP */ | ||||
| 
 | ||||
| /* Device Dump information to be filled by drivers */ | ||||
|  | ||||
| @ -19,7 +19,7 @@ | ||||
|  *   https://lists.openwall.net/linux-kernel/2011/01/09/56
 | ||||
|  */ | ||||
| 
 | ||||
| #include <linux/kernel.h> | ||||
| #include <linux/math.h> | ||||
| 
 | ||||
| extern unsigned long loops_per_jiffy; | ||||
| 
 | ||||
|  | ||||
| @ -3193,6 +3193,7 @@ static inline void remove_inode_hash(struct inode *inode) | ||||
| } | ||||
| 
 | ||||
| extern void inode_sb_list_add(struct inode *inode); | ||||
| extern void inode_add_lru(struct inode *inode); | ||||
| 
 | ||||
| extern int sb_set_blocksize(struct super_block *, int); | ||||
| extern int sb_min_blocksize(struct super_block *, int); | ||||
|  | ||||
| @ -38,8 +38,9 @@ | ||||
| 
 | ||||
| #include <asm/page.h> | ||||
| #include <linux/bug.h> | ||||
| #include <linux/kernel.h> | ||||
| #include <linux/log2.h> | ||||
| #include <linux/math.h> | ||||
| #include <linux/types.h> | ||||
| 
 | ||||
| struct genradix_root; | ||||
| 
 | ||||
|  | ||||
| @ -477,8 +477,7 @@ static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode) | ||||
| extern const struct file_operations hugetlbfs_file_operations; | ||||
| extern const struct vm_operations_struct hugetlb_vm_ops; | ||||
| struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct, | ||||
| 				struct ucounts **ucounts, int creat_flags, | ||||
| 				int page_size_log); | ||||
| 				int creat_flags, int page_size_log); | ||||
| 
 | ||||
| static inline bool is_file_hugepages(struct file *file) | ||||
| { | ||||
| @ -497,8 +496,7 @@ static inline struct hstate *hstate_inode(struct inode *i) | ||||
| #define is_file_hugepages(file)			false | ||||
| static inline struct file * | ||||
| hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag, | ||||
| 		struct ucounts **ucounts, int creat_flags, | ||||
| 		int page_size_log) | ||||
| 		int creat_flags, int page_size_log) | ||||
| { | ||||
| 	return ERR_PTR(-ENOSYS); | ||||
| } | ||||
|  | ||||
							
								
								
									
										8
									
								
								include/linux/instruction_pointer.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										8
									
								
								include/linux/instruction_pointer.h
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,8 @@ | ||||
| /* SPDX-License-Identifier: GPL-2.0 */ | ||||
| #ifndef _LINUX_INSTRUCTION_POINTER_H | ||||
| #define _LINUX_INSTRUCTION_POINTER_H | ||||
| 
 | ||||
| #define _RET_IP_		(unsigned long)__builtin_return_address(0) | ||||
| #define _THIS_IP_  ({ __label__ __here; __here: (unsigned long)&&__here; }) | ||||
| 
 | ||||
| #endif /* _LINUX_INSTRUCTION_POINTER_H */ | ||||
| @ -24,25 +24,16 @@ | ||||
| struct cred; | ||||
| struct module; | ||||
| 
 | ||||
| static inline int is_kernel_inittext(unsigned long addr) | ||||
| { | ||||
| 	if (addr >= (unsigned long)_sinittext | ||||
| 	    && addr <= (unsigned long)_einittext) | ||||
| 		return 1; | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| static inline int is_kernel_text(unsigned long addr) | ||||
| { | ||||
| 	if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) || | ||||
| 	    arch_is_kernel_text(addr)) | ||||
| 	if (__is_kernel_text(addr)) | ||||
| 		return 1; | ||||
| 	return in_gate_area_no_mm(addr); | ||||
| } | ||||
| 
 | ||||
| static inline int is_kernel(unsigned long addr) | ||||
| { | ||||
| 	if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end) | ||||
| 	if (__is_kernel(addr)) | ||||
| 		return 1; | ||||
| 	return in_gate_area_no_mm(addr); | ||||
| } | ||||
|  | ||||
| @ -9,6 +9,7 @@ | ||||
| #include <linux/stddef.h> | ||||
| #include <linux/types.h> | ||||
| #include <linux/compiler.h> | ||||
| #include <linux/container_of.h> | ||||
| #include <linux/bitops.h> | ||||
| #include <linux/kstrtox.h> | ||||
| #include <linux/log2.h> | ||||
| @ -19,6 +20,7 @@ | ||||
| #include <linux/printk.h> | ||||
| #include <linux/build_bug.h> | ||||
| #include <linux/static_call_types.h> | ||||
| #include <linux/instruction_pointer.h> | ||||
| #include <asm/byteorder.h> | ||||
| 
 | ||||
| #include <uapi/linux/kernel.h> | ||||
| @ -52,11 +54,6 @@ | ||||
| }					\ | ||||
| ) | ||||
| 
 | ||||
| #define typeof_member(T, m)	typeof(((T*)0)->m) | ||||
| 
 | ||||
| #define _RET_IP_		(unsigned long)__builtin_return_address(0) | ||||
| #define _THIS_IP_  ({ __label__ __here; __here: (unsigned long)&&__here; }) | ||||
| 
 | ||||
| /**
 | ||||
|  * upper_32_bits - return bits 32-63 of a number | ||||
|  * @n: the number we're accessing | ||||
| @ -228,8 +225,6 @@ extern bool parse_option_str(const char *str, const char *option); | ||||
| extern char *next_arg(char *args, char **param, char **val); | ||||
| 
 | ||||
| extern int core_kernel_text(unsigned long addr); | ||||
| extern int init_kernel_text(unsigned long addr); | ||||
| extern int core_kernel_data(unsigned long addr); | ||||
| extern int __kernel_text_address(unsigned long addr); | ||||
| extern int kernel_text_address(unsigned long addr); | ||||
| extern int func_ptr_is_kernel_text(void *ptr); | ||||
| @ -483,36 +478,6 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } | ||||
| #define __CONCAT(a, b) a ## b | ||||
| #define CONCATENATE(a, b) __CONCAT(a, b) | ||||
| 
 | ||||
| /**
 | ||||
|  * container_of - cast a member of a structure out to the containing structure | ||||
|  * @ptr:	the pointer to the member. | ||||
|  * @type:	the type of the container struct this is embedded in. | ||||
|  * @member:	the name of the member within the struct. | ||||
|  * | ||||
|  */ | ||||
| #define container_of(ptr, type, member) ({				\ | ||||
| 	void *__mptr = (void *)(ptr);					\ | ||||
| 	BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) &&	\ | ||||
| 			 !__same_type(*(ptr), void),			\ | ||||
| 			 "pointer type mismatch in container_of()");	\ | ||||
| 	((type *)(__mptr - offsetof(type, member))); }) | ||||
| 
 | ||||
| /**
 | ||||
|  * container_of_safe - cast a member of a structure out to the containing structure | ||||
|  * @ptr:	the pointer to the member. | ||||
|  * @type:	the type of the container struct this is embedded in. | ||||
|  * @member:	the name of the member within the struct. | ||||
|  * | ||||
|  * If IS_ERR_OR_NULL(ptr), ptr is returned unchanged. | ||||
|  */ | ||||
| #define container_of_safe(ptr, type, member) ({				\ | ||||
| 	void *__mptr = (void *)(ptr);					\ | ||||
| 	BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) &&	\ | ||||
| 			 !__same_type(*(ptr), void),			\ | ||||
| 			 "pointer type mismatch in container_of()");	\ | ||||
| 	IS_ERR_OR_NULL(__mptr) ? ERR_CAST(__mptr) :			\ | ||||
| 		((type *)(__mptr - offsetof(type, member))); }) | ||||
| 
 | ||||
| /* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */ | ||||
| #ifdef CONFIG_FTRACE_MCOUNT_RECORD | ||||
| # define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD | ||||
|  | ||||
| @ -2,11 +2,13 @@ | ||||
| #ifndef _LINUX_LIST_H | ||||
| #define _LINUX_LIST_H | ||||
| 
 | ||||
| #include <linux/container_of.h> | ||||
| #include <linux/types.h> | ||||
| #include <linux/stddef.h> | ||||
| #include <linux/poison.h> | ||||
| #include <linux/const.h> | ||||
| #include <linux/kernel.h> | ||||
| 
 | ||||
| #include <asm/barrier.h> | ||||
| 
 | ||||
| /*
 | ||||
|  * Circular doubly linked list implementation. | ||||
|  | ||||
| @ -49,7 +49,9 @@ | ||||
|  */ | ||||
| 
 | ||||
| #include <linux/atomic.h> | ||||
| #include <linux/kernel.h> | ||||
| #include <linux/container_of.h> | ||||
| #include <linux/stddef.h> | ||||
| #include <linux/types.h> | ||||
| 
 | ||||
| struct llist_head { | ||||
| 	struct llist_node *first; | ||||
|  | ||||
| @ -23,6 +23,56 @@ static inline bool mapping_empty(struct address_space *mapping) | ||||
| 	return xa_empty(&mapping->i_pages); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * mapping_shrinkable - test if page cache state allows inode reclaim | ||||
|  * @mapping: the page cache mapping | ||||
|  * | ||||
|  * This checks the mapping's cache state for the pupose of inode | ||||
|  * reclaim and LRU management. | ||||
|  * | ||||
|  * The caller is expected to hold the i_lock, but is not required to | ||||
|  * hold the i_pages lock, which usually protects cache state. That's | ||||
|  * because the i_lock and the list_lru lock that protect the inode and | ||||
|  * its LRU state don't nest inside the irq-safe i_pages lock. | ||||
|  * | ||||
|  * Cache deletions are performed under the i_lock, which ensures that | ||||
|  * when an inode goes empty, it will reliably get queued on the LRU. | ||||
|  * | ||||
|  * Cache additions do not acquire the i_lock and may race with this | ||||
|  * check, in which case we'll report the inode as shrinkable when it | ||||
|  * has cache pages. This is okay: the shrinker also checks the | ||||
|  * refcount and the referenced bit, which will be elevated or set in | ||||
|  * the process of adding new cache pages to an inode. | ||||
|  */ | ||||
| static inline bool mapping_shrinkable(struct address_space *mapping) | ||||
| { | ||||
| 	void *head; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * On highmem systems, there could be lowmem pressure from the | ||||
| 	 * inodes before there is highmem pressure from the page | ||||
| 	 * cache. Make inodes shrinkable regardless of cache state. | ||||
| 	 */ | ||||
| 	if (IS_ENABLED(CONFIG_HIGHMEM)) | ||||
| 		return true; | ||||
| 
 | ||||
| 	/* Cache completely empty? Shrink away. */ | ||||
| 	head = rcu_access_pointer(mapping->i_pages.xa_head); | ||||
| 	if (!head) | ||||
| 		return true; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * The xarray stores single offset-0 entries directly in the | ||||
| 	 * head pointer, which allows non-resident page cache entries | ||||
| 	 * to escape the shadow shrinker's list of xarray nodes. The | ||||
| 	 * inode shrinker needs to pick them up under memory pressure. | ||||
| 	 */ | ||||
| 	if (!xa_is_node(head) && xa_is_value(head)) | ||||
| 		return true; | ||||
| 
 | ||||
| 	return false; | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * Bits in mapping->flags. | ||||
|  */ | ||||
|  | ||||
| @ -73,8 +73,11 @@ | ||||
| #ifndef _LINUX_PLIST_H_ | ||||
| #define _LINUX_PLIST_H_ | ||||
| 
 | ||||
| #include <linux/kernel.h> | ||||
| #include <linux/container_of.h> | ||||
| #include <linux/list.h> | ||||
| #include <linux/types.h> | ||||
| 
 | ||||
| #include <asm/bug.h> | ||||
| 
 | ||||
| struct plist_head { | ||||
| 	struct list_head node_list; | ||||
|  | ||||
| @ -9,8 +9,10 @@ | ||||
| #define _LINUX_RADIX_TREE_H | ||||
| 
 | ||||
| #include <linux/bitops.h> | ||||
| #include <linux/kernel.h> | ||||
| #include <linux/gfp.h> | ||||
| #include <linux/list.h> | ||||
| #include <linux/lockdep.h> | ||||
| #include <linux/math.h> | ||||
| #include <linux/percpu.h> | ||||
| #include <linux/preempt.h> | ||||
| #include <linux/rcupdate.h> | ||||
|  | ||||
| @ -11,7 +11,6 @@ | ||||
| #include <linux/linkage.h> | ||||
| 
 | ||||
| #include <linux/types.h> | ||||
| #include <linux/kernel.h> | ||||
| #include <linux/list.h> | ||||
| #include <linux/spinlock.h> | ||||
| #include <linux/atomic.h> | ||||
|  | ||||
| @ -9,8 +9,17 @@ | ||||
| #ifndef __LINUX_SCALE_BITMAP_H | ||||
| #define __LINUX_SCALE_BITMAP_H | ||||
| 
 | ||||
| #include <linux/kernel.h> | ||||
| #include <linux/atomic.h> | ||||
| #include <linux/bitops.h> | ||||
| #include <linux/cache.h> | ||||
| #include <linux/list.h> | ||||
| #include <linux/log2.h> | ||||
| #include <linux/minmax.h> | ||||
| #include <linux/percpu.h> | ||||
| #include <linux/slab.h> | ||||
| #include <linux/smp.h> | ||||
| #include <linux/types.h> | ||||
| #include <linux/wait.h> | ||||
| 
 | ||||
| struct seq_file; | ||||
| 
 | ||||
|  | ||||
| @ -4,6 +4,7 @@ | ||||
| 
 | ||||
| #include <linux/types.h> | ||||
| #include <linux/string.h> | ||||
| #include <linux/string_helpers.h> | ||||
| #include <linux/bug.h> | ||||
| #include <linux/mutex.h> | ||||
| #include <linux/cpumask.h> | ||||
| @ -135,7 +136,21 @@ static inline void seq_escape_str(struct seq_file *m, const char *src, | ||||
| 	seq_escape_mem(m, src, strlen(src), flags, esc); | ||||
| } | ||||
| 
 | ||||
| void seq_escape(struct seq_file *m, const char *s, const char *esc); | ||||
| /**
 | ||||
|  * seq_escape - print string into buffer, escaping some characters | ||||
|  * @m: target buffer | ||||
|  * @s: NULL-terminated string | ||||
|  * @esc: set of characters that need escaping | ||||
|  * | ||||
|  * Puts string into buffer, replacing each occurrence of character from | ||||
|  * @esc with usual octal escape. | ||||
|  * | ||||
|  * Use seq_has_overflowed() to check for errors. | ||||
|  */ | ||||
| static inline void seq_escape(struct seq_file *m, const char *s, const char *esc) | ||||
| { | ||||
| 	seq_escape_str(m, s, ESCAPE_OCTAL, esc); | ||||
| } | ||||
| 
 | ||||
| void seq_hex_dump(struct seq_file *m, const char *prefix_str, int prefix_type, | ||||
| 		  int rowsize, int groupsize, const void *buf, size_t len, | ||||
| @ -194,7 +209,7 @@ static const struct file_operations __name ## _fops = {			\ | ||||
| #define DEFINE_PROC_SHOW_ATTRIBUTE(__name)				\ | ||||
| static int __name ## _open(struct inode *inode, struct file *file)	\ | ||||
| {									\ | ||||
| 	return single_open(file, __name ## _show, inode->i_private);	\ | ||||
| 	return single_open(file, __name ## _show, PDE_DATA(inode));	\ | ||||
| }									\ | ||||
| 									\ | ||||
| static const struct proc_ops __name ## _proc_ops = {			\ | ||||
|  | ||||
| @ -126,7 +126,6 @@ static inline int sigequalsets(const sigset_t *set1, const sigset_t *set2) | ||||
| #define sigmask(sig)	(1UL << ((sig) - 1)) | ||||
| 
 | ||||
| #ifndef __HAVE_ARCH_SIG_SETOPS | ||||
| #include <linux/string.h> | ||||
| 
 | ||||
| #define _SIG_SET_BINOP(name, op)					\ | ||||
| static inline void name(sigset_t *r, const sigset_t *a, const sigset_t *b) \ | ||||
|  | ||||
| @ -108,7 +108,6 @@ static inline void on_each_cpu_cond(smp_cond_func_t cond_func, | ||||
| #ifdef CONFIG_SMP | ||||
| 
 | ||||
| #include <linux/preempt.h> | ||||
| #include <linux/kernel.h> | ||||
| #include <linux/compiler.h> | ||||
| #include <linux/thread_info.h> | ||||
| #include <asm/smp.h> | ||||
|  | ||||
| @ -57,7 +57,6 @@ | ||||
| #include <linux/compiler.h> | ||||
| #include <linux/irqflags.h> | ||||
| #include <linux/thread_info.h> | ||||
| #include <linux/kernel.h> | ||||
| #include <linux/stringify.h> | ||||
| #include <linux/bottom_half.h> | ||||
| #include <linux/lockdep.h> | ||||
|  | ||||
| @ -25,6 +25,11 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries, | ||||
| unsigned int stack_depot_fetch(depot_stack_handle_t handle, | ||||
| 			       unsigned long **entries); | ||||
| 
 | ||||
| int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size, | ||||
| 		       int spaces); | ||||
| 
 | ||||
| void stack_depot_print(depot_stack_handle_t stack); | ||||
| 
 | ||||
| #ifdef CONFIG_STACKDEPOT | ||||
| int stack_depot_init(void); | ||||
| #else | ||||
|  | ||||
Some files were not shown because too many files have changed in this diff Show More
		Loading…
	
		Reference in New Issue
	
	Block a user