file_table.c 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354
  1. /*
  2. * linux/fs/file_table.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
  6. */
  7. #include <linux/string.h>
  8. #include <linux/slab.h>
  9. #include <linux/file.h>
  10. #include <linux/fdtable.h>
  11. #include <linux/init.h>
  12. #include <linux/module.h>
  13. #include <linux/fs.h>
  14. #include <linux/security.h>
  15. #include <linux/eventpoll.h>
  16. #include <linux/rcupdate.h>
  17. #include <linux/mount.h>
  18. #include <linux/capability.h>
  19. #include <linux/cdev.h>
  20. #include <linux/fsnotify.h>
  21. #include <linux/sysctl.h>
  22. #include <linux/lglock.h>
  23. #include <linux/percpu_counter.h>
  24. #include <linux/percpu.h>
  25. #include <linux/hardirq.h>
  26. #include <linux/task_work.h>
  27. #include <linux/ima.h>
  28. #include <linux/atomic.h>
  29. #include "internal.h"
  30. /* sysctl tunables... */
  31. struct files_stat_struct files_stat = {
  32. .max_files = NR_FILE
  33. };
  34. /* SLAB cache for file structures */
  35. static struct kmem_cache *filp_cachep __read_mostly;
  36. static struct percpu_counter nr_files __cacheline_aligned_in_smp;
  37. static void file_free_rcu(struct rcu_head *head)
  38. {
  39. struct file *f = container_of(head, struct file, f_u.fu_rcuhead);
  40. put_cred(f->f_cred);
  41. kmem_cache_free(filp_cachep, f);
  42. }
  43. static inline void file_free(struct file *f)
  44. {
  45. percpu_counter_dec(&nr_files);
  46. call_rcu(&f->f_u.fu_rcuhead, file_free_rcu);
  47. }
  48. /*
  49. * Return the total number of open files in the system
  50. */
  51. static long get_nr_files(void)
  52. {
  53. return percpu_counter_read_positive(&nr_files);
  54. }
  55. /*
  56. * Return the maximum number of open files in the system
  57. */
  58. unsigned long get_max_files(void)
  59. {
  60. return files_stat.max_files;
  61. }
  62. EXPORT_SYMBOL_GPL(get_max_files);
  63. /*
  64. * Handle nr_files sysctl
  65. */
  66. #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
  67. int proc_nr_files(struct ctl_table *table, int write,
  68. void __user *buffer, size_t *lenp, loff_t *ppos)
  69. {
  70. files_stat.nr_files = get_nr_files();
  71. return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
  72. }
  73. #else
  74. int proc_nr_files(struct ctl_table *table, int write,
  75. void __user *buffer, size_t *lenp, loff_t *ppos)
  76. {
  77. return -ENOSYS;
  78. }
  79. #endif
  80. /* Find an unused file structure and return a pointer to it.
  81. * Returns an error pointer if some error happend e.g. we over file
  82. * structures limit, run out of memory or operation is not permitted.
  83. *
  84. * Be very careful using this. You are responsible for
  85. * getting write access to any mount that you might assign
  86. * to this filp, if it is opened for write. If this is not
  87. * done, you will imbalance int the mount's writer count
  88. * and a warning at __fput() time.
  89. */
  90. struct file *get_empty_filp(void)
  91. {
  92. const struct cred *cred = current_cred();
  93. static long old_max;
  94. struct file *f;
  95. int error;
  96. /*
  97. * Privileged users can go above max_files
  98. */
  99. if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) {
  100. /*
  101. * percpu_counters are inaccurate. Do an expensive check before
  102. * we go and fail.
  103. */
  104. if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files)
  105. goto over;
  106. }
  107. f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL);
  108. if (unlikely(!f))
  109. return ERR_PTR(-ENOMEM);
  110. percpu_counter_inc(&nr_files);
  111. f->f_cred = get_cred(cred);
  112. error = security_file_alloc(f);
  113. if (unlikely(error)) {
  114. file_free(f);
  115. return ERR_PTR(error);
  116. }
  117. atomic_long_set(&f->f_count, 1);
  118. rwlock_init(&f->f_owner.lock);
  119. spin_lock_init(&f->f_lock);
  120. mutex_init(&f->f_pos_lock);
  121. eventpoll_init_file(f);
  122. /* f->f_version: 0 */
  123. return f;
  124. over:
  125. /* Ran out of filps - report that */
  126. if (get_nr_files() > old_max) {
  127. #ifdef FD_OVER_CHECK
  128. static int fd_dump_all_files;
  129. if (!fd_dump_all_files) {
  130. struct task_struct *p;
  131. struct files_struct *files;
  132. pid_t pid;
  133. fd_dump_all_files = 0x1;
  134. for_each_process(p) {
  135. if (p->flags & PF_KTHREAD)
  136. continue;
  137. files = p->files;
  138. if (files) {
  139. struct fdtable *fdt = files_fdtable(files);
  140. if (fdt) {
  141. pid = p->pid;
  142. pr_err("[FDLEAK]dump FDs for [%d:%s]\n", pid, p->comm);
  143. fd_show_open_files(pid, files, fdt);
  144. }
  145. }
  146. }
  147. }
  148. #endif
  149. pr_info("VFS: file-max limit %lu reached\n", get_max_files());
  150. old_max = get_nr_files();
  151. }
  152. return ERR_PTR(-ENFILE);
  153. }
  154. /**
  155. * alloc_file - allocate and initialize a 'struct file'
  156. *
  157. * @path: the (dentry, vfsmount) pair for the new file
  158. * @mode: the mode with which the new file will be opened
  159. * @fop: the 'struct file_operations' for the new file
  160. */
  161. struct file *alloc_file(struct path *path, fmode_t mode,
  162. const struct file_operations *fop)
  163. {
  164. struct file *file;
  165. file = get_empty_filp();
  166. if (IS_ERR(file))
  167. return file;
  168. file->f_path = *path;
  169. file->f_inode = path->dentry->d_inode;
  170. file->f_mapping = path->dentry->d_inode->i_mapping;
  171. if ((mode & FMODE_READ) &&
  172. likely(fop->read || fop->aio_read || fop->read_iter))
  173. mode |= FMODE_CAN_READ;
  174. if ((mode & FMODE_WRITE) &&
  175. likely(fop->write || fop->aio_write || fop->write_iter))
  176. mode |= FMODE_CAN_WRITE;
  177. file->f_mode = mode;
  178. file->f_op = fop;
  179. if ((mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
  180. i_readcount_inc(path->dentry->d_inode);
  181. return file;
  182. }
  183. EXPORT_SYMBOL(alloc_file);
  184. /* the real guts of fput() - releasing the last reference to file
  185. */
  186. static void __fput(struct file *file)
  187. {
  188. struct dentry *dentry = file->f_path.dentry;
  189. struct vfsmount *mnt = file->f_path.mnt;
  190. struct inode *inode = file->f_inode;
  191. might_sleep();
  192. fsnotify_close(file);
  193. /*
  194. * The function eventpoll_release() should be the first called
  195. * in the file cleanup chain.
  196. */
  197. eventpoll_release(file);
  198. locks_remove_file(file);
  199. if (unlikely(file->f_flags & FASYNC)) {
  200. if (file->f_op->fasync)
  201. file->f_op->fasync(-1, file, 0);
  202. }
  203. ima_file_free(file);
  204. if (file->f_op->release)
  205. file->f_op->release(inode, file);
  206. security_file_free(file);
  207. if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL &&
  208. !(file->f_mode & FMODE_PATH))) {
  209. cdev_put(inode->i_cdev);
  210. }
  211. fops_put(file->f_op);
  212. put_pid(file->f_owner.pid);
  213. if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
  214. i_readcount_dec(inode);
  215. if (file->f_mode & FMODE_WRITER) {
  216. put_write_access(inode);
  217. __mnt_drop_write(mnt);
  218. }
  219. file->f_path.dentry = NULL;
  220. file->f_path.mnt = NULL;
  221. file->f_inode = NULL;
  222. file_free(file);
  223. dput(dentry);
  224. mntput(mnt);
  225. }
  226. static LLIST_HEAD(delayed_fput_list);
  227. static void delayed_fput(struct work_struct *unused)
  228. {
  229. struct llist_node *node = llist_del_all(&delayed_fput_list);
  230. struct llist_node *next;
  231. for (; node; node = next) {
  232. next = llist_next(node);
  233. __fput(llist_entry(node, struct file, f_u.fu_llist));
  234. }
  235. }
  236. static void ____fput(struct callback_head *work)
  237. {
  238. __fput(container_of(work, struct file, f_u.fu_rcuhead));
  239. }
  240. /*
  241. * If kernel thread really needs to have the final fput() it has done
  242. * to complete, call this. The only user right now is the boot - we
  243. * *do* need to make sure our writes to binaries on initramfs has
  244. * not left us with opened struct file waiting for __fput() - execve()
  245. * won't work without that. Please, don't add more callers without
  246. * very good reasons; in particular, never call that with locks
  247. * held and never call that from a thread that might need to do
  248. * some work on any kind of umount.
  249. */
  250. void flush_delayed_fput(void)
  251. {
  252. delayed_fput(NULL);
  253. }
  254. static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput);
  255. void fput(struct file *file)
  256. {
  257. if (atomic_long_dec_and_test(&file->f_count)) {
  258. struct task_struct *task = current;
  259. if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) {
  260. init_task_work(&file->f_u.fu_rcuhead, ____fput);
  261. if (!task_work_add(task, &file->f_u.fu_rcuhead, true))
  262. return;
  263. /*
  264. * After this task has run exit_task_work(),
  265. * task_work_add() will fail. Fall through to delayed
  266. * fput to avoid leaking *file.
  267. */
  268. }
  269. if (llist_add(&file->f_u.fu_llist, &delayed_fput_list))
  270. schedule_delayed_work(&delayed_fput_work, 1);
  271. }
  272. }
  273. /*
  274. * synchronous analog of fput(); for kernel threads that might be needed
  275. * in some umount() (and thus can't use flush_delayed_fput() without
  276. * risking deadlocks), need to wait for completion of __fput() and know
  277. * for this specific struct file it won't involve anything that would
  278. * need them. Use only if you really need it - at the very least,
  279. * don't blindly convert fput() by kernel thread to that.
  280. */
  281. void __fput_sync(struct file *file)
  282. {
  283. if (atomic_long_dec_and_test(&file->f_count)) {
  284. struct task_struct *task = current;
  285. BUG_ON(!(task->flags & PF_KTHREAD));
  286. __fput(file);
  287. }
  288. }
  289. EXPORT_SYMBOL(fput);
  290. void put_filp(struct file *file)
  291. {
  292. if (atomic_long_dec_and_test(&file->f_count)) {
  293. security_file_free(file);
  294. file_free(file);
  295. }
  296. }
  297. void __init files_init(unsigned long mempages)
  298. {
  299. unsigned long n;
  300. filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
  301. SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
  302. /*
  303. * One file with associated inode and dcache is very roughly 1K.
  304. * Per default don't use more than 10% of our memory for files.
  305. */
  306. n = (mempages * (PAGE_SIZE / 1024)) / 10;
  307. files_stat.max_files = max_t(unsigned long, n, NR_FILE);
  308. percpu_counter_init(&nr_files, 0, GFP_KERNEL);
  309. }