mem.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894
  1. /*
  2. * linux/drivers/char/mem.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. *
  6. * Added devfs support.
  7. * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
  8. * Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
  9. */
  10. #include <linux/mm.h>
  11. #include <linux/miscdevice.h>
  12. #include <linux/slab.h>
  13. #include <linux/vmalloc.h>
  14. #include <linux/mman.h>
  15. #include <linux/random.h>
  16. #include <linux/init.h>
  17. #include <linux/raw.h>
  18. #include <linux/tty.h>
  19. #include <linux/capability.h>
  20. #include <linux/ptrace.h>
  21. #include <linux/device.h>
  22. #include <linux/highmem.h>
  23. #include <linux/backing-dev.h>
  24. #include <linux/splice.h>
  25. #include <linux/pfn.h>
  26. #include <linux/export.h>
  27. #include <linux/io.h>
  28. #include <linux/aio.h>
  29. #include <asm/uaccess.h>
  30. #ifdef CONFIG_IA64
  31. # include <linux/efi.h>
  32. #endif
  33. #define DEVPORT_MINOR 4
  34. static inline unsigned long size_inside_page(unsigned long start,
  35. unsigned long size)
  36. {
  37. unsigned long sz;
  38. sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
  39. return min(sz, size);
  40. }
  41. #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
  42. static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
  43. {
  44. return addr + count <= __pa(high_memory);
  45. }
  46. static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
  47. {
  48. return 1;
  49. }
  50. #endif
  51. #if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM)
  52. #ifdef CONFIG_STRICT_DEVMEM
  53. static inline int range_is_allowed(unsigned long pfn, unsigned long size)
  54. {
  55. u64 from = ((u64)pfn) << PAGE_SHIFT;
  56. u64 to = from + size;
  57. u64 cursor = from;
  58. while (cursor < to) {
  59. if (!devmem_is_allowed(pfn)) {
  60. printk(KERN_INFO
  61. "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
  62. current->comm, from, to);
  63. return 0;
  64. }
  65. cursor += PAGE_SIZE;
  66. pfn++;
  67. }
  68. return 1;
  69. }
  70. #else
  71. static inline int range_is_allowed(unsigned long pfn, unsigned long size)
  72. {
  73. return 1;
  74. }
  75. #endif
  76. #endif
  77. #ifdef CONFIG_DEVMEM
  78. void __weak unxlate_dev_mem_ptr(unsigned long phys, void *addr)
  79. {
  80. }
  81. /*
  82. * This funcion reads the *physical* memory. The f_pos points directly to the
  83. * memory location.
  84. */
  85. static ssize_t read_mem(struct file *file, char __user *buf,
  86. size_t count, loff_t *ppos)
  87. {
  88. phys_addr_t p = *ppos;
  89. ssize_t read, sz;
  90. char *ptr;
  91. if (p != *ppos)
  92. return 0;
  93. if (!valid_phys_addr_range(p, count))
  94. return -EFAULT;
  95. read = 0;
  96. #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
  97. /* we don't have page 0 mapped on sparc and m68k.. */
  98. if (p < PAGE_SIZE) {
  99. sz = size_inside_page(p, count);
  100. if (sz > 0) {
  101. if (clear_user(buf, sz))
  102. return -EFAULT;
  103. buf += sz;
  104. p += sz;
  105. count -= sz;
  106. read += sz;
  107. }
  108. }
  109. #endif
  110. while (count > 0) {
  111. unsigned long remaining;
  112. sz = size_inside_page(p, count);
  113. if (!range_is_allowed(p >> PAGE_SHIFT, count))
  114. return -EPERM;
  115. /*
  116. * On ia64 if a page has been mapped somewhere as uncached, then
  117. * it must also be accessed uncached by the kernel or data
  118. * corruption may occur.
  119. */
  120. ptr = xlate_dev_mem_ptr(p);
  121. if (!ptr)
  122. return -EFAULT;
  123. remaining = copy_to_user(buf, ptr, sz);
  124. unxlate_dev_mem_ptr(p, ptr);
  125. if (remaining)
  126. return -EFAULT;
  127. buf += sz;
  128. p += sz;
  129. count -= sz;
  130. read += sz;
  131. }
  132. *ppos += read;
  133. return read;
  134. }
  135. static ssize_t write_mem(struct file *file, const char __user *buf,
  136. size_t count, loff_t *ppos)
  137. {
  138. phys_addr_t p = *ppos;
  139. ssize_t written, sz;
  140. unsigned long copied;
  141. void *ptr;
  142. if (p != *ppos)
  143. return -EFBIG;
  144. if (!valid_phys_addr_range(p, count))
  145. return -EFAULT;
  146. written = 0;
  147. #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
  148. /* we don't have page 0 mapped on sparc and m68k.. */
  149. if (p < PAGE_SIZE) {
  150. sz = size_inside_page(p, count);
  151. /* Hmm. Do something? */
  152. buf += sz;
  153. p += sz;
  154. count -= sz;
  155. written += sz;
  156. }
  157. #endif
  158. while (count > 0) {
  159. sz = size_inside_page(p, count);
  160. if (!range_is_allowed(p >> PAGE_SHIFT, sz))
  161. return -EPERM;
  162. /*
  163. * On ia64 if a page has been mapped somewhere as uncached, then
  164. * it must also be accessed uncached by the kernel or data
  165. * corruption may occur.
  166. */
  167. ptr = xlate_dev_mem_ptr(p);
  168. if (!ptr) {
  169. if (written)
  170. break;
  171. return -EFAULT;
  172. }
  173. copied = copy_from_user(ptr, buf, sz);
  174. unxlate_dev_mem_ptr(p, ptr);
  175. if (copied) {
  176. written += sz - copied;
  177. if (written)
  178. break;
  179. return -EFAULT;
  180. }
  181. buf += sz;
  182. p += sz;
  183. count -= sz;
  184. written += sz;
  185. }
  186. *ppos += written;
  187. return written;
  188. }
  189. #endif /* CONFIG_DEVMEM */
  190. #if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM)
  191. int __weak phys_mem_access_prot_allowed(struct file *file,
  192. unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
  193. {
  194. return 1;
  195. }
  196. #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
  197. /*
  198. * Architectures vary in how they handle caching for addresses
  199. * outside of main memory.
  200. *
  201. */
  202. #ifdef pgprot_noncached
  203. static int uncached_access(struct file *file, phys_addr_t addr)
  204. {
  205. #if defined(CONFIG_IA64)
  206. /*
  207. * On ia64, we ignore O_DSYNC because we cannot tolerate memory
  208. * attribute aliases.
  209. */
  210. return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
  211. #elif defined(CONFIG_MIPS)
  212. {
  213. extern int __uncached_access(struct file *file,
  214. unsigned long addr);
  215. return __uncached_access(file, addr);
  216. }
  217. #else
  218. /*
  219. * Accessing memory above the top the kernel knows about or through a
  220. * file pointer
  221. * that was marked O_DSYNC will be done non-cached.
  222. */
  223. if (file->f_flags & O_DSYNC)
  224. return 1;
  225. return addr >= __pa(high_memory);
  226. #endif
  227. }
  228. #endif
  229. static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
  230. unsigned long size, pgprot_t vma_prot)
  231. {
  232. #ifdef pgprot_noncached
  233. phys_addr_t offset = pfn << PAGE_SHIFT;
  234. if (uncached_access(file, offset))
  235. return pgprot_noncached(vma_prot);
  236. #endif
  237. return vma_prot;
  238. }
  239. #endif
  240. #ifndef CONFIG_MMU
  241. static unsigned long get_unmapped_area_mem(struct file *file,
  242. unsigned long addr,
  243. unsigned long len,
  244. unsigned long pgoff,
  245. unsigned long flags)
  246. {
  247. if (!valid_mmap_phys_addr_range(pgoff, len))
  248. return (unsigned long) -EINVAL;
  249. return pgoff << PAGE_SHIFT;
  250. }
  251. /* can't do an in-place private mapping if there's no MMU */
  252. static inline int private_mapping_ok(struct vm_area_struct *vma)
  253. {
  254. return vma->vm_flags & VM_MAYSHARE;
  255. }
  256. #else
  257. #define get_unmapped_area_mem NULL
  258. static inline int private_mapping_ok(struct vm_area_struct *vma)
  259. {
  260. return 1;
  261. }
  262. #endif
  263. static const struct vm_operations_struct mmap_mem_ops = {
  264. #ifdef CONFIG_HAVE_IOREMAP_PROT
  265. .access = generic_access_phys
  266. #endif
  267. };
  268. static int mmap_mem(struct file *file, struct vm_area_struct *vma)
  269. {
  270. size_t size = vma->vm_end - vma->vm_start;
  271. if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
  272. return -EINVAL;
  273. if (!private_mapping_ok(vma))
  274. return -ENOSYS;
  275. if (!range_is_allowed(vma->vm_pgoff, size))
  276. return -EPERM;
  277. if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
  278. &vma->vm_page_prot))
  279. return -EINVAL;
  280. vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
  281. size,
  282. vma->vm_page_prot);
  283. vma->vm_ops = &mmap_mem_ops;
  284. /* Remap-pfn-range will mark the range VM_IO */
  285. if (remap_pfn_range(vma,
  286. vma->vm_start,
  287. vma->vm_pgoff,
  288. size,
  289. vma->vm_page_prot)) {
  290. return -EAGAIN;
  291. }
  292. return 0;
  293. }
  294. #endif /* CONFIG_DEVMEM */
  295. #ifdef CONFIG_DEVKMEM
  296. static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
  297. {
  298. unsigned long pfn;
  299. /* Turn a kernel-virtual address into a physical page frame */
  300. pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
  301. /*
  302. * RED-PEN: on some architectures there is more mapped memory than
  303. * available in mem_map which pfn_valid checks for. Perhaps should add a
  304. * new macro here.
  305. *
  306. * RED-PEN: vmalloc is not supported right now.
  307. */
  308. if (!pfn_valid(pfn))
  309. return -EIO;
  310. vma->vm_pgoff = pfn;
  311. return mmap_mem(file, vma);
  312. }
  313. #endif
  314. #ifdef CONFIG_DEVKMEM
  315. /*
  316. * This function reads the *virtual* memory as seen by the kernel.
  317. */
  318. static ssize_t read_kmem(struct file *file, char __user *buf,
  319. size_t count, loff_t *ppos)
  320. {
  321. unsigned long p = *ppos;
  322. ssize_t low_count, read, sz;
  323. char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
  324. int err = 0;
  325. read = 0;
  326. if (p < (unsigned long) high_memory) {
  327. low_count = count;
  328. if (count > (unsigned long)high_memory - p)
  329. low_count = (unsigned long)high_memory - p;
  330. #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
  331. /* we don't have page 0 mapped on sparc and m68k.. */
  332. if (p < PAGE_SIZE && low_count > 0) {
  333. sz = size_inside_page(p, low_count);
  334. if (clear_user(buf, sz))
  335. return -EFAULT;
  336. buf += sz;
  337. p += sz;
  338. read += sz;
  339. low_count -= sz;
  340. count -= sz;
  341. }
  342. #endif
  343. while (low_count > 0) {
  344. sz = size_inside_page(p, low_count);
  345. /*
  346. * On ia64 if a page has been mapped somewhere as
  347. * uncached, then it must also be accessed uncached
  348. * by the kernel or data corruption may occur
  349. */
  350. kbuf = xlate_dev_kmem_ptr((char *)p);
  351. if (copy_to_user(buf, kbuf, sz))
  352. return -EFAULT;
  353. buf += sz;
  354. p += sz;
  355. read += sz;
  356. low_count -= sz;
  357. count -= sz;
  358. }
  359. }
  360. if (count > 0) {
  361. kbuf = (char *)__get_free_page(GFP_KERNEL);
  362. if (!kbuf)
  363. return -ENOMEM;
  364. while (count > 0) {
  365. sz = size_inside_page(p, count);
  366. if (!is_vmalloc_or_module_addr((void *)p)) {
  367. err = -ENXIO;
  368. break;
  369. }
  370. sz = vread(kbuf, (char *)p, sz);
  371. if (!sz)
  372. break;
  373. if (copy_to_user(buf, kbuf, sz)) {
  374. err = -EFAULT;
  375. break;
  376. }
  377. count -= sz;
  378. buf += sz;
  379. read += sz;
  380. p += sz;
  381. }
  382. free_page((unsigned long)kbuf);
  383. }
  384. *ppos = p;
  385. return read ? read : err;
  386. }
  387. static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
  388. size_t count, loff_t *ppos)
  389. {
  390. ssize_t written, sz;
  391. unsigned long copied;
  392. written = 0;
  393. #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
  394. /* we don't have page 0 mapped on sparc and m68k.. */
  395. if (p < PAGE_SIZE) {
  396. sz = size_inside_page(p, count);
  397. /* Hmm. Do something? */
  398. buf += sz;
  399. p += sz;
  400. count -= sz;
  401. written += sz;
  402. }
  403. #endif
  404. while (count > 0) {
  405. char *ptr;
  406. sz = size_inside_page(p, count);
  407. /*
  408. * On ia64 if a page has been mapped somewhere as uncached, then
  409. * it must also be accessed uncached by the kernel or data
  410. * corruption may occur.
  411. */
  412. ptr = xlate_dev_kmem_ptr((char *)p);
  413. copied = copy_from_user(ptr, buf, sz);
  414. if (copied) {
  415. written += sz - copied;
  416. if (written)
  417. break;
  418. return -EFAULT;
  419. }
  420. buf += sz;
  421. p += sz;
  422. count -= sz;
  423. written += sz;
  424. }
  425. *ppos += written;
  426. return written;
  427. }
  428. /*
  429. * This function writes to the *virtual* memory as seen by the kernel.
  430. */
  431. static ssize_t write_kmem(struct file *file, const char __user *buf,
  432. size_t count, loff_t *ppos)
  433. {
  434. unsigned long p = *ppos;
  435. ssize_t wrote = 0;
  436. ssize_t virtr = 0;
  437. char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
  438. int err = 0;
  439. if (p < (unsigned long) high_memory) {
  440. unsigned long to_write = min_t(unsigned long, count,
  441. (unsigned long)high_memory - p);
  442. wrote = do_write_kmem(p, buf, to_write, ppos);
  443. if (wrote != to_write)
  444. return wrote;
  445. p += wrote;
  446. buf += wrote;
  447. count -= wrote;
  448. }
  449. if (count > 0) {
  450. kbuf = (char *)__get_free_page(GFP_KERNEL);
  451. if (!kbuf)
  452. return wrote ? wrote : -ENOMEM;
  453. while (count > 0) {
  454. unsigned long sz = size_inside_page(p, count);
  455. unsigned long n;
  456. if (!is_vmalloc_or_module_addr((void *)p)) {
  457. err = -ENXIO;
  458. break;
  459. }
  460. n = copy_from_user(kbuf, buf, sz);
  461. if (n) {
  462. err = -EFAULT;
  463. break;
  464. }
  465. vwrite(kbuf, (char *)p, sz);
  466. count -= sz;
  467. buf += sz;
  468. virtr += sz;
  469. p += sz;
  470. }
  471. free_page((unsigned long)kbuf);
  472. }
  473. *ppos = p;
  474. return virtr + wrote ? : err;
  475. }
  476. #endif
  477. #ifdef CONFIG_DEVPORT
  478. static ssize_t read_port(struct file *file, char __user *buf,
  479. size_t count, loff_t *ppos)
  480. {
  481. unsigned long i = *ppos;
  482. char __user *tmp = buf;
  483. if (!access_ok(VERIFY_WRITE, buf, count))
  484. return -EFAULT;
  485. while (count-- > 0 && i < 65536) {
  486. if (__put_user(inb(i), tmp) < 0)
  487. return -EFAULT;
  488. i++;
  489. tmp++;
  490. }
  491. *ppos = i;
  492. return tmp-buf;
  493. }
  494. static ssize_t write_port(struct file *file, const char __user *buf,
  495. size_t count, loff_t *ppos)
  496. {
  497. unsigned long i = *ppos;
  498. const char __user *tmp = buf;
  499. if (!access_ok(VERIFY_READ, buf, count))
  500. return -EFAULT;
  501. while (count-- > 0 && i < 65536) {
  502. char c;
  503. if (__get_user(c, tmp)) {
  504. if (tmp > buf)
  505. break;
  506. return -EFAULT;
  507. }
  508. outb(c, i);
  509. i++;
  510. tmp++;
  511. }
  512. *ppos = i;
  513. return tmp-buf;
  514. }
  515. #endif
  516. static ssize_t read_null(struct file *file, char __user *buf,
  517. size_t count, loff_t *ppos)
  518. {
  519. return 0;
  520. }
  521. static ssize_t write_null(struct file *file, const char __user *buf,
  522. size_t count, loff_t *ppos)
  523. {
  524. return count;
  525. }
  526. static ssize_t aio_read_null(struct kiocb *iocb, const struct iovec *iov,
  527. unsigned long nr_segs, loff_t pos)
  528. {
  529. return 0;
  530. }
  531. static ssize_t aio_write_null(struct kiocb *iocb, const struct iovec *iov,
  532. unsigned long nr_segs, loff_t pos)
  533. {
  534. return iov_length(iov, nr_segs);
  535. }
  536. static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
  537. struct splice_desc *sd)
  538. {
  539. return sd->len;
  540. }
  541. static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
  542. loff_t *ppos, size_t len, unsigned int flags)
  543. {
  544. return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
  545. }
  546. static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
  547. {
  548. size_t written = 0;
  549. while (iov_iter_count(iter)) {
  550. size_t chunk = iov_iter_count(iter), n;
  551. if (chunk > PAGE_SIZE)
  552. chunk = PAGE_SIZE; /* Just for latency reasons */
  553. n = iov_iter_zero(chunk, iter);
  554. if (!n && iov_iter_count(iter))
  555. return written ? written : -EFAULT;
  556. written += n;
  557. if (signal_pending(current))
  558. return written ? written : -ERESTARTSYS;
  559. cond_resched();
  560. }
  561. return written;
  562. }
  563. static int mmap_zero(struct file *file, struct vm_area_struct *vma)
  564. {
  565. #ifndef CONFIG_MMU
  566. return -ENOSYS;
  567. #endif
  568. if (vma->vm_flags & VM_SHARED)
  569. return shmem_zero_setup(vma);
  570. return 0;
  571. }
  572. static ssize_t write_full(struct file *file, const char __user *buf,
  573. size_t count, loff_t *ppos)
  574. {
  575. return -ENOSPC;
  576. }
  577. /*
  578. * Special lseek() function for /dev/null and /dev/zero. Most notably, you
  579. * can fopen() both devices with "a" now. This was previously impossible.
  580. * -- SRB.
  581. */
  582. static loff_t null_lseek(struct file *file, loff_t offset, int orig)
  583. {
  584. return file->f_pos = 0;
  585. }
  586. #if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM) || defined(CONFIG_DEVPORT)
  587. /*
  588. * The memory devices use the full 32/64 bits of the offset, and so we cannot
  589. * check against negative addresses: they are ok. The return value is weird,
  590. * though, in that case (0).
  591. *
  592. * also note that seeking relative to the "end of file" isn't supported:
  593. * it has no meaning, so it returns -EINVAL.
  594. */
  595. static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
  596. {
  597. loff_t ret;
  598. mutex_lock(&file_inode(file)->i_mutex);
  599. switch (orig) {
  600. case SEEK_CUR:
  601. offset += file->f_pos;
  602. case SEEK_SET:
  603. /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
  604. if (IS_ERR_VALUE((unsigned long long)offset)) {
  605. ret = -EOVERFLOW;
  606. break;
  607. }
  608. file->f_pos = offset;
  609. ret = file->f_pos;
  610. force_successful_syscall_return();
  611. break;
  612. default:
  613. ret = -EINVAL;
  614. }
  615. mutex_unlock(&file_inode(file)->i_mutex);
  616. return ret;
  617. }
  618. #endif
  619. #if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM) || defined(CONFIG_DEVPORT)
  620. static int open_port(struct inode *inode, struct file *filp)
  621. {
  622. return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
  623. }
  624. #endif
  625. #define zero_lseek null_lseek
  626. #define full_lseek null_lseek
  627. #define write_zero write_null
  628. #define aio_write_zero aio_write_null
  629. #define open_mem open_port
  630. #define open_kmem open_mem
  631. #ifdef CONFIG_DEVMEM
  632. static const struct file_operations mem_fops = {
  633. .llseek = memory_lseek,
  634. .read = read_mem,
  635. .write = write_mem,
  636. .mmap = mmap_mem,
  637. .open = open_mem,
  638. .get_unmapped_area = get_unmapped_area_mem,
  639. };
  640. #endif
  641. #ifdef CONFIG_DEVKMEM
  642. static const struct file_operations kmem_fops = {
  643. .llseek = memory_lseek,
  644. .read = read_kmem,
  645. .write = write_kmem,
  646. .mmap = mmap_kmem,
  647. .open = open_kmem,
  648. .get_unmapped_area = get_unmapped_area_mem,
  649. };
  650. #endif
  651. static const struct file_operations null_fops = {
  652. .llseek = null_lseek,
  653. .read = read_null,
  654. .write = write_null,
  655. .aio_read = aio_read_null,
  656. .aio_write = aio_write_null,
  657. .splice_write = splice_write_null,
  658. };
  659. #ifdef CONFIG_DEVPORT
  660. static const struct file_operations port_fops = {
  661. .llseek = memory_lseek,
  662. .read = read_port,
  663. .write = write_port,
  664. .open = open_port,
  665. };
  666. #endif
  667. static const struct file_operations zero_fops = {
  668. .llseek = zero_lseek,
  669. .read = new_sync_read,
  670. .write = write_zero,
  671. .read_iter = read_iter_zero,
  672. .aio_write = aio_write_zero,
  673. .mmap = mmap_zero,
  674. };
  675. /*
  676. * capabilities for /dev/zero
  677. * - permits private mappings, "copies" are taken of the source of zeros
  678. * - no writeback happens
  679. */
  680. static struct backing_dev_info zero_bdi = {
  681. .name = "char/mem",
  682. .capabilities = BDI_CAP_MAP_COPY | BDI_CAP_NO_ACCT_AND_WRITEBACK,
  683. };
  684. static const struct file_operations full_fops = {
  685. .llseek = full_lseek,
  686. .read = new_sync_read,
  687. .read_iter = read_iter_zero,
  688. .write = write_full,
  689. };
  690. static const struct memdev {
  691. const char *name;
  692. umode_t mode;
  693. const struct file_operations *fops;
  694. struct backing_dev_info *dev_info;
  695. } devlist[] = {
  696. #ifdef CONFIG_DEVMEM
  697. [1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi },
  698. #endif
  699. #ifdef CONFIG_DEVKMEM
  700. [2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi },
  701. #endif
  702. [3] = { "null", 0666, &null_fops, NULL },
  703. #ifdef CONFIG_DEVPORT
  704. [4] = { "port", 0, &port_fops, NULL },
  705. #endif
  706. [5] = { "zero", 0666, &zero_fops, &zero_bdi },
  707. [7] = { "full", 0666, &full_fops, NULL },
  708. [8] = { "random", 0666, &random_fops, NULL },
  709. [9] = { "urandom", 0666, &urandom_fops, NULL },
  710. #ifdef CONFIG_PRINTK
  711. [11] = { "kmsg", 0644, &kmsg_fops, NULL },
  712. #endif
  713. };
  714. static int memory_open(struct inode *inode, struct file *filp)
  715. {
  716. int minor;
  717. const struct memdev *dev;
  718. minor = iminor(inode);
  719. if (minor >= ARRAY_SIZE(devlist))
  720. return -ENXIO;
  721. dev = &devlist[minor];
  722. if (!dev->fops)
  723. return -ENXIO;
  724. filp->f_op = dev->fops;
  725. if (dev->dev_info)
  726. filp->f_mapping->backing_dev_info = dev->dev_info;
  727. /* Is /dev/mem or /dev/kmem ? */
  728. if (dev->dev_info == &directly_mappable_cdev_bdi)
  729. filp->f_mode |= FMODE_UNSIGNED_OFFSET;
  730. if (dev->fops->open)
  731. return dev->fops->open(inode, filp);
  732. return 0;
  733. }
  734. static const struct file_operations memory_fops = {
  735. .open = memory_open,
  736. .llseek = noop_llseek,
  737. };
  738. static char *mem_devnode(struct device *dev, umode_t *mode)
  739. {
  740. if (mode && devlist[MINOR(dev->devt)].mode)
  741. *mode = devlist[MINOR(dev->devt)].mode;
  742. return NULL;
  743. }
  744. static struct class *mem_class;
  745. static int __init chr_dev_init(void)
  746. {
  747. int minor;
  748. int err;
  749. err = bdi_init(&zero_bdi);
  750. if (err)
  751. return err;
  752. if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
  753. printk("unable to get major %d for memory devs\n", MEM_MAJOR);
  754. mem_class = class_create(THIS_MODULE, "mem");
  755. if (IS_ERR(mem_class))
  756. return PTR_ERR(mem_class);
  757. mem_class->devnode = mem_devnode;
  758. for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
  759. if (!devlist[minor].name)
  760. continue;
  761. /*
  762. * Create /dev/port?
  763. */
  764. if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
  765. continue;
  766. device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
  767. NULL, devlist[minor].name);
  768. }
  769. return tty_init();
  770. }
  771. fs_initcall(chr_dev_init);