1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/fs/file_table.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
7 */
8
9#include <linux/string.h>
10#include <linux/slab.h>
11#include <linux/file.h>
12#include <linux/init.h>
13#include <linux/module.h>
14#include <linux/fs.h>
15#include <linux/filelock.h>
16#include <linux/security.h>
17#include <linux/cred.h>
18#include <linux/eventpoll.h>
19#include <linux/rcupdate.h>
20#include <linux/mount.h>
21#include <linux/capability.h>
22#include <linux/cdev.h>
23#include <linux/fsnotify.h>
24#include <linux/sysctl.h>
25#include <linux/percpu_counter.h>
26#include <linux/percpu.h>
27#include <linux/task_work.h>
28#include <linux/swap.h>
29#include <linux/kmemleak.h>
30
31#include <linux/atomic.h>
32
33#include "internal.h"
34
35/* sysctl tunables... */
36static struct files_stat_struct files_stat = {
37 .max_files = NR_FILE
38};
39
40/* SLAB cache for file structures */
41static struct kmem_cache *filp_cachep __ro_after_init;
42static struct kmem_cache *bfilp_cachep __ro_after_init;
43
44static struct percpu_counter nr_files __cacheline_aligned_in_smp;
45
46/* Container for backing file with optional user path */
47struct backing_file {
48 struct file file;
49 union {
50 struct path user_path;
51 freeptr_t bf_freeptr;
52 };
53};
54
55#define backing_file(f) container_of(f, struct backing_file, file)
56
57const struct path *backing_file_user_path(const struct file *f)
58{
59 return &backing_file(f)->user_path;
60}
61EXPORT_SYMBOL_GPL(backing_file_user_path);
62
63void backing_file_set_user_path(struct file *f, const struct path *path)
64{
65 backing_file(f)->user_path = *path;
66}
67EXPORT_SYMBOL_GPL(backing_file_set_user_path);
68
69static inline void file_free(struct file *f)
70{
71 security_file_free(file: f);
72 if (likely(!(f->f_mode & FMODE_NOACCOUNT)))
73 percpu_counter_dec(fbc: &nr_files);
74 put_cred(cred: f->f_cred);
75 if (unlikely(f->f_mode & FMODE_BACKING)) {
76 path_put(backing_file_user_path(f));
77 kmem_cache_free(s: bfilp_cachep, backing_file(f));
78 } else {
79 kmem_cache_free(s: filp_cachep, objp: f);
80 }
81}
82
83/*
84 * Return the total number of open files in the system
85 */
86static long get_nr_files(void)
87{
88 return percpu_counter_read_positive(fbc: &nr_files);
89}
90
91/*
92 * Return the maximum number of open files in the system
93 */
94unsigned long get_max_files(void)
95{
96 return files_stat.max_files;
97}
98EXPORT_SYMBOL_GPL(get_max_files);
99
100#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
101
102/*
103 * Handle nr_files sysctl
104 */
105static int proc_nr_files(const struct ctl_table *table, int write, void *buffer,
106 size_t *lenp, loff_t *ppos)
107{
108 files_stat.nr_files = percpu_counter_sum_positive(fbc: &nr_files);
109 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
110}
111
112static const struct ctl_table fs_stat_sysctls[] = {
113 {
114 .procname = "file-nr",
115 .data = &files_stat,
116 .maxlen = sizeof(files_stat),
117 .mode = 0444,
118 .proc_handler = proc_nr_files,
119 },
120 {
121 .procname = "file-max",
122 .data = &files_stat.max_files,
123 .maxlen = sizeof(files_stat.max_files),
124 .mode = 0644,
125 .proc_handler = proc_doulongvec_minmax,
126 .extra1 = SYSCTL_LONG_ZERO,
127 .extra2 = SYSCTL_LONG_MAX,
128 },
129 {
130 .procname = "nr_open",
131 .data = &sysctl_nr_open,
132 .maxlen = sizeof(unsigned int),
133 .mode = 0644,
134 .proc_handler = proc_douintvec_minmax,
135 .extra1 = &sysctl_nr_open_min,
136 .extra2 = &sysctl_nr_open_max,
137 },
138};
139
140static int __init init_fs_stat_sysctls(void)
141{
142 register_sysctl_init("fs", fs_stat_sysctls);
143 if (IS_ENABLED(CONFIG_BINFMT_MISC)) {
144 struct ctl_table_header *hdr;
145
146 hdr = register_sysctl_mount_point(path: "fs/binfmt_misc");
147 kmemleak_not_leak(ptr: hdr);
148 }
149 return 0;
150}
151fs_initcall(init_fs_stat_sysctls);
152#endif
153
154static int init_file(struct file *f, int flags, const struct cred *cred)
155{
156 int error;
157
158 f->f_cred = get_cred(cred);
159 error = security_file_alloc(file: f);
160 if (unlikely(error)) {
161 put_cred(cred: f->f_cred);
162 return error;
163 }
164
165 spin_lock_init(&f->f_lock);
166 /*
167 * Note that f_pos_lock is only used for files raising
168 * FMODE_ATOMIC_POS and directories. Other files such as pipes
169 * don't need it and since f_pos_lock is in a union may reuse
170 * the space for other purposes. They are expected to initialize
171 * the respective member when opening the file.
172 */
173 mutex_init(&f->f_pos_lock);
174 memset(s: &f->__f_path, c: 0, n: sizeof(f->f_path));
175 memset(s: &f->f_ra, c: 0, n: sizeof(f->f_ra));
176
177 f->f_flags = flags;
178 f->f_mode = OPEN_FMODE(flags);
179
180 f->f_op = NULL;
181 f->f_mapping = NULL;
182 f->private_data = NULL;
183 f->f_inode = NULL;
184 f->f_owner = NULL;
185#ifdef CONFIG_EPOLL
186 f->f_ep = NULL;
187#endif
188
189 f->f_iocb_flags = 0;
190 f->f_pos = 0;
191 f->f_wb_err = 0;
192 f->f_sb_err = 0;
193
194 /*
195 * We're SLAB_TYPESAFE_BY_RCU so initialize f_count last. While
196 * fget-rcu pattern users need to be able to handle spurious
197 * refcount bumps we should reinitialize the reused file first.
198 */
199 file_ref_init(ref: &f->f_ref, cnt: 1);
200 /*
201 * Disable permission and pre-content events for all files by default.
202 * They may be enabled later by fsnotify_open_perm_and_set_mode().
203 */
204 file_set_fsnotify_mode(file: f, FMODE_NONOTIFY_PERM);
205 return 0;
206}
207
208/* Find an unused file structure and return a pointer to it.
209 * Returns an error pointer if some error happend e.g. we over file
210 * structures limit, run out of memory or operation is not permitted.
211 *
212 * Be very careful using this. You are responsible for
213 * getting write access to any mount that you might assign
214 * to this filp, if it is opened for write. If this is not
215 * done, you will imbalance int the mount's writer count
216 * and a warning at __fput() time.
217 */
218struct file *alloc_empty_file(int flags, const struct cred *cred)
219{
220 static long old_max;
221 struct file *f;
222 int error;
223
224 /*
225 * Privileged users can go above max_files
226 */
227 if (unlikely(get_nr_files() >= files_stat.max_files) &&
228 !capable(CAP_SYS_ADMIN)) {
229 /*
230 * percpu_counters are inaccurate. Do an expensive check before
231 * we go and fail.
232 */
233 if (percpu_counter_sum_positive(fbc: &nr_files) >= files_stat.max_files)
234 goto over;
235 }
236
237 f = kmem_cache_alloc(filp_cachep, GFP_KERNEL);
238 if (unlikely(!f))
239 return ERR_PTR(error: -ENOMEM);
240
241 error = init_file(f, flags, cred);
242 if (unlikely(error)) {
243 kmem_cache_free(s: filp_cachep, objp: f);
244 return ERR_PTR(error);
245 }
246
247 percpu_counter_inc(fbc: &nr_files);
248
249 return f;
250
251over:
252 /* Ran out of filps - report that */
253 if (get_nr_files() > old_max) {
254 pr_info("VFS: file-max limit %lu reached\n", get_max_files());
255 old_max = get_nr_files();
256 }
257 return ERR_PTR(error: -ENFILE);
258}
259
260/*
261 * Variant of alloc_empty_file() that doesn't check and modify nr_files.
262 *
263 * This is only for kernel internal use, and the allocate file must not be
264 * installed into file tables or such.
265 */
266struct file *alloc_empty_file_noaccount(int flags, const struct cred *cred)
267{
268 struct file *f;
269 int error;
270
271 f = kmem_cache_alloc(filp_cachep, GFP_KERNEL);
272 if (unlikely(!f))
273 return ERR_PTR(error: -ENOMEM);
274
275 error = init_file(f, flags, cred);
276 if (unlikely(error)) {
277 kmem_cache_free(s: filp_cachep, objp: f);
278 return ERR_PTR(error);
279 }
280
281 f->f_mode |= FMODE_NOACCOUNT;
282
283 return f;
284}
285
286/*
287 * Variant of alloc_empty_file() that allocates a backing_file container
288 * and doesn't check and modify nr_files.
289 *
290 * This is only for kernel internal use, and the allocate file must not be
291 * installed into file tables or such.
292 */
293struct file *alloc_empty_backing_file(int flags, const struct cred *cred)
294{
295 struct backing_file *ff;
296 int error;
297
298 ff = kmem_cache_alloc(bfilp_cachep, GFP_KERNEL);
299 if (unlikely(!ff))
300 return ERR_PTR(error: -ENOMEM);
301
302 error = init_file(f: &ff->file, flags, cred);
303 if (unlikely(error)) {
304 kmem_cache_free(s: bfilp_cachep, objp: ff);
305 return ERR_PTR(error);
306 }
307
308 ff->file.f_mode |= FMODE_BACKING | FMODE_NOACCOUNT;
309 return &ff->file;
310}
311
312/**
313 * file_init_path - initialize a 'struct file' based on path
314 *
315 * @file: the file to set up
316 * @path: the (dentry, vfsmount) pair for the new file
317 * @fop: the 'struct file_operations' for the new file
318 */
319static void file_init_path(struct file *file, const struct path *path,
320 const struct file_operations *fop)
321{
322 file->__f_path = *path;
323 file->f_inode = path->dentry->d_inode;
324 file->f_mapping = path->dentry->d_inode->i_mapping;
325 file->f_wb_err = filemap_sample_wb_err(mapping: file->f_mapping);
326 file->f_sb_err = file_sample_sb_err(file);
327 if (fop->llseek)
328 file->f_mode |= FMODE_LSEEK;
329 if ((file->f_mode & FMODE_READ) &&
330 likely(fop->read || fop->read_iter))
331 file->f_mode |= FMODE_CAN_READ;
332 if ((file->f_mode & FMODE_WRITE) &&
333 likely(fop->write || fop->write_iter))
334 file->f_mode |= FMODE_CAN_WRITE;
335 file->f_iocb_flags = iocb_flags(file);
336 file->f_mode |= FMODE_OPENED;
337 file->f_op = fop;
338 if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
339 i_readcount_inc(inode: path->dentry->d_inode);
340}
341
342/**
343 * alloc_file - allocate and initialize a 'struct file'
344 *
345 * @path: the (dentry, vfsmount) pair for the new file
346 * @flags: O_... flags with which the new file will be opened
347 * @fop: the 'struct file_operations' for the new file
348 */
349static struct file *alloc_file(const struct path *path, int flags,
350 const struct file_operations *fop)
351{
352 struct file *file;
353
354 file = alloc_empty_file(flags, current_cred());
355 if (!IS_ERR(ptr: file))
356 file_init_path(file, path, fop);
357 return file;
358}
359
360static inline int alloc_path_pseudo(const char *name, struct inode *inode,
361 struct vfsmount *mnt, struct path *path)
362{
363 path->dentry = d_alloc_pseudo(mnt->mnt_sb, &QSTR(name));
364 if (!path->dentry)
365 return -ENOMEM;
366 path->mnt = mntget(mnt);
367 d_instantiate(path->dentry, inode);
368 return 0;
369}
370
371struct file *alloc_file_pseudo(struct inode *inode, struct vfsmount *mnt,
372 const char *name, int flags,
373 const struct file_operations *fops)
374{
375 int ret;
376 struct path path;
377 struct file *file;
378
379 ret = alloc_path_pseudo(name, inode, mnt, path: &path);
380 if (ret)
381 return ERR_PTR(error: ret);
382
383 file = alloc_file(path: &path, flags, fop: fops);
384 if (IS_ERR(ptr: file)) {
385 ihold(inode);
386 path_put(&path);
387 return file;
388 }
389 /*
390 * Disable all fsnotify events for pseudo files by default.
391 * They may be enabled by caller with file_set_fsnotify_mode().
392 */
393 file_set_fsnotify_mode(file, FMODE_NONOTIFY);
394 return file;
395}
396EXPORT_SYMBOL(alloc_file_pseudo);
397
398struct file *alloc_file_pseudo_noaccount(struct inode *inode,
399 struct vfsmount *mnt, const char *name,
400 int flags,
401 const struct file_operations *fops)
402{
403 int ret;
404 struct path path;
405 struct file *file;
406
407 ret = alloc_path_pseudo(name, inode, mnt, path: &path);
408 if (ret)
409 return ERR_PTR(error: ret);
410
411 file = alloc_empty_file_noaccount(flags, current_cred());
412 if (IS_ERR(ptr: file)) {
413 ihold(inode);
414 path_put(&path);
415 return file;
416 }
417 file_init_path(file, path: &path, fop: fops);
418 /*
419 * Disable all fsnotify events for pseudo files by default.
420 * They may be enabled by caller with file_set_fsnotify_mode().
421 */
422 file_set_fsnotify_mode(file, FMODE_NONOTIFY);
423 return file;
424}
425EXPORT_SYMBOL_GPL(alloc_file_pseudo_noaccount);
426
427struct file *alloc_file_clone(struct file *base, int flags,
428 const struct file_operations *fops)
429{
430 struct file *f;
431
432 f = alloc_file(path: &base->f_path, flags, fop: fops);
433 if (!IS_ERR(ptr: f)) {
434 path_get(&f->f_path);
435 f->f_mapping = base->f_mapping;
436 }
437 return f;
438}
439
440/* the real guts of fput() - releasing the last reference to file
441 */
442static void __fput(struct file *file)
443{
444 struct dentry *dentry = file->f_path.dentry;
445 struct vfsmount *mnt = file->f_path.mnt;
446 struct inode *inode = file->f_inode;
447 fmode_t mode = file->f_mode;
448
449 if (unlikely(!(file->f_mode & FMODE_OPENED)))
450 goto out;
451
452 might_sleep();
453
454 fsnotify_close(file);
455 /*
456 * The function eventpoll_release() should be the first called
457 * in the file cleanup chain.
458 */
459 eventpoll_release(file);
460 locks_remove_file(file);
461
462 security_file_release(file);
463 if (unlikely(file->f_flags & FASYNC)) {
464 if (file->f_op->fasync)
465 file->f_op->fasync(-1, file, 0);
466 }
467 if (file->f_op->release)
468 file->f_op->release(inode, file);
469 if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL &&
470 !(mode & FMODE_PATH))) {
471 cdev_put(p: inode->i_cdev);
472 }
473 fops_put(file->f_op);
474 file_f_owner_release(file);
475 put_file_access(file);
476 dput(dentry);
477 if (unlikely(mode & FMODE_NEED_UNMOUNT))
478 dissolve_on_fput(mnt);
479 mntput(mnt);
480out:
481 file_free(f: file);
482}
483
484static LLIST_HEAD(delayed_fput_list);
485static void delayed_fput(struct work_struct *unused)
486{
487 struct llist_node *node = llist_del_all(head: &delayed_fput_list);
488 struct file *f, *t;
489
490 llist_for_each_entry_safe(f, t, node, f_llist)
491 __fput(file: f);
492}
493
494static void ____fput(struct callback_head *work)
495{
496 __fput(container_of(work, struct file, f_task_work));
497}
498
499static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput);
500
501/*
502 * If kernel thread really needs to have the final fput() it has done
503 * to complete, call this. The only user right now is the boot - we
504 * *do* need to make sure our writes to binaries on initramfs has
505 * not left us with opened struct file waiting for __fput() - execve()
506 * won't work without that. Please, don't add more callers without
507 * very good reasons; in particular, never call that with locks
508 * held and never call that from a thread that might need to do
509 * some work on any kind of umount.
510 */
511void flush_delayed_fput(void)
512{
513 delayed_fput(NULL);
514 flush_delayed_work(dwork: &delayed_fput_work);
515}
516EXPORT_SYMBOL_GPL(flush_delayed_fput);
517
518static void __fput_deferred(struct file *file)
519{
520 struct task_struct *task = current;
521
522 if (unlikely(!(file->f_mode & (FMODE_BACKING | FMODE_OPENED)))) {
523 file_free(f: file);
524 return;
525 }
526
527 if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) {
528 init_task_work(twork: &file->f_task_work, func: ____fput);
529 if (!task_work_add(task, twork: &file->f_task_work, mode: TWA_RESUME))
530 return;
531 /*
532 * After this task has run exit_task_work(),
533 * task_work_add() will fail. Fall through to delayed
534 * fput to avoid leaking *file.
535 */
536 }
537
538 if (llist_add(new: &file->f_llist, head: &delayed_fput_list))
539 schedule_delayed_work(dwork: &delayed_fput_work, delay: 1);
540}
541
542void fput(struct file *file)
543{
544 if (unlikely(file_ref_put(&file->f_ref)))
545 __fput_deferred(file);
546}
547EXPORT_SYMBOL(fput);
548
549/*
550 * synchronous analog of fput(); for kernel threads that might be needed
551 * in some umount() (and thus can't use flush_delayed_fput() without
552 * risking deadlocks), need to wait for completion of __fput() and know
553 * for this specific struct file it won't involve anything that would
554 * need them. Use only if you really need it - at the very least,
555 * don't blindly convert fput() by kernel thread to that.
556 */
557void __fput_sync(struct file *file)
558{
559 if (file_ref_put(ref: &file->f_ref))
560 __fput(file);
561}
562EXPORT_SYMBOL(__fput_sync);
563
564/*
565 * Equivalent to __fput_sync(), but optimized for being called with the last
566 * reference.
567 *
568 * See file_ref_put_close() for details.
569 */
570void fput_close_sync(struct file *file)
571{
572 if (likely(file_ref_put_close(&file->f_ref)))
573 __fput(file);
574}
575
576/*
577 * Equivalent to fput(), but optimized for being called with the last
578 * reference.
579 *
580 * See file_ref_put_close() for details.
581 */
582void fput_close(struct file *file)
583{
584 if (file_ref_put_close(ref: &file->f_ref))
585 __fput_deferred(file);
586}
587
588void __init files_init(void)
589{
590 struct kmem_cache_args args = {
591 .use_freeptr_offset = true,
592 .freeptr_offset = offsetof(struct file, f_freeptr),
593 };
594
595 filp_cachep = kmem_cache_create("filp", sizeof(struct file), &args,
596 SLAB_HWCACHE_ALIGN | SLAB_PANIC |
597 SLAB_ACCOUNT | SLAB_TYPESAFE_BY_RCU);
598
599 args.freeptr_offset = offsetof(struct backing_file, bf_freeptr);
600 bfilp_cachep = kmem_cache_create("bfilp", sizeof(struct backing_file),
601 &args, SLAB_HWCACHE_ALIGN | SLAB_PANIC |
602 SLAB_ACCOUNT | SLAB_TYPESAFE_BY_RCU);
603 percpu_counter_init(&nr_files, 0, GFP_KERNEL);
604}
605
606/*
607 * One file with associated inode and dcache is very roughly 1K. Per default
608 * do not use more than 10% of our memory for files.
609 */
610void __init files_maxfiles_init(void)
611{
612 unsigned long n;
613 unsigned long nr_pages = totalram_pages();
614 unsigned long memreserve = (nr_pages - nr_free_pages()) * 3/2;
615
616 memreserve = min(memreserve, nr_pages - 1);
617 n = ((nr_pages - memreserve) * (PAGE_SIZE / 1024)) / 10;
618
619 files_stat.max_files = max_t(unsigned long, n, NR_FILE);
620}
621