mem.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924
  1. /*
  2. * linux/drivers/char/mem.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. *
  6. * Added devfs support.
  7. * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
  8. * Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
  9. */
  10. #include <linux/mm.h>
  11. #include <linux/miscdevice.h>
  12. #include <linux/slab.h>
  13. #include <linux/vmalloc.h>
  14. #include <linux/mman.h>
  15. #include <linux/random.h>
  16. #include <linux/init.h>
  17. #include <linux/raw.h>
  18. #include <linux/tty.h>
  19. #include <linux/capability.h>
  20. #include <linux/ptrace.h>
  21. #include <linux/device.h>
  22. #include <linux/highmem.h>
  23. #include <linux/backing-dev.h>
  24. #include <linux/shmem_fs.h>
  25. #include <linux/splice.h>
  26. #include <linux/pfn.h>
  27. #include <linux/export.h>
  28. #include <linux/io.h>
  29. #include <linux/uio.h>
  30. #include <linux/uaccess.h>
  31. #ifdef CONFIG_IA64
  32. # include <linux/efi.h>
  33. #endif
  34. #define DEVPORT_MINOR 4
  35. static inline unsigned long size_inside_page(unsigned long start,
  36. unsigned long size)
  37. {
  38. unsigned long sz;
  39. sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
  40. return min(sz, size);
  41. }
  42. #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
  43. static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
  44. {
  45. return addr + count <= __pa(high_memory);
  46. }
  47. static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
  48. {
  49. return 1;
  50. }
  51. #endif
  52. #ifdef CONFIG_STRICT_DEVMEM
  53. static inline int page_is_allowed(unsigned long pfn)
  54. {
  55. return devmem_is_allowed(pfn);
  56. }
  57. static inline int range_is_allowed(unsigned long pfn, unsigned long size)
  58. {
  59. u64 from = ((u64)pfn) << PAGE_SHIFT;
  60. u64 to = from + size;
  61. u64 cursor = from;
  62. while (cursor < to) {
  63. if (!devmem_is_allowed(pfn))
  64. return 0;
  65. cursor += PAGE_SIZE;
  66. pfn++;
  67. }
  68. return 1;
  69. }
  70. #else
  71. static inline int page_is_allowed(unsigned long pfn)
  72. {
  73. return 1;
  74. }
  75. static inline int range_is_allowed(unsigned long pfn, unsigned long size)
  76. {
  77. return 1;
  78. }
  79. #endif
  80. #ifndef unxlate_dev_mem_ptr
  81. #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
  82. void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
  83. {
  84. }
  85. #endif
  86. /*
  87. * This funcion reads the *physical* memory. The f_pos points directly to the
  88. * memory location.
  89. */
  90. static ssize_t read_mem(struct file *file, char __user *buf,
  91. size_t count, loff_t *ppos)
  92. {
  93. phys_addr_t p = *ppos;
  94. ssize_t read, sz;
  95. void *ptr;
  96. if (p != *ppos)
  97. return 0;
  98. if (!valid_phys_addr_range(p, count))
  99. return -EFAULT;
  100. read = 0;
  101. #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
  102. /* we don't have page 0 mapped on sparc and m68k.. */
  103. if (p < PAGE_SIZE) {
  104. sz = size_inside_page(p, count);
  105. if (sz > 0) {
  106. if (clear_user(buf, sz))
  107. return -EFAULT;
  108. buf += sz;
  109. p += sz;
  110. count -= sz;
  111. read += sz;
  112. }
  113. }
  114. #endif
  115. while (count > 0) {
  116. unsigned long remaining;
  117. int allowed;
  118. sz = size_inside_page(p, count);
  119. allowed = page_is_allowed(p >> PAGE_SHIFT);
  120. if (!allowed)
  121. return -EPERM;
  122. if (allowed == 2) {
  123. /* Show zeros for restricted memory. */
  124. remaining = clear_user(buf, sz);
  125. } else {
  126. /*
  127. * On ia64 if a page has been mapped somewhere as
  128. * uncached, then it must also be accessed uncached
  129. * by the kernel or data corruption may occur.
  130. */
  131. ptr = xlate_dev_mem_ptr(p);
  132. if (!ptr)
  133. return -EFAULT;
  134. remaining = copy_to_user(buf, ptr, sz);
  135. unxlate_dev_mem_ptr(p, ptr);
  136. }
  137. if (remaining)
  138. return -EFAULT;
  139. buf += sz;
  140. p += sz;
  141. count -= sz;
  142. read += sz;
  143. }
  144. *ppos += read;
  145. return read;
  146. }
  147. static ssize_t write_mem(struct file *file, const char __user *buf,
  148. size_t count, loff_t *ppos)
  149. {
  150. phys_addr_t p = *ppos;
  151. ssize_t written, sz;
  152. unsigned long copied;
  153. void *ptr;
  154. if (p != *ppos)
  155. return -EFBIG;
  156. if (!valid_phys_addr_range(p, count))
  157. return -EFAULT;
  158. written = 0;
  159. #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
  160. /* we don't have page 0 mapped on sparc and m68k.. */
  161. if (p < PAGE_SIZE) {
  162. sz = size_inside_page(p, count);
  163. /* Hmm. Do something? */
  164. buf += sz;
  165. p += sz;
  166. count -= sz;
  167. written += sz;
  168. }
  169. #endif
  170. while (count > 0) {
  171. int allowed;
  172. sz = size_inside_page(p, count);
  173. allowed = page_is_allowed(p >> PAGE_SHIFT);
  174. if (!allowed)
  175. return -EPERM;
  176. /* Skip actual writing when a page is marked as restricted. */
  177. if (allowed == 1) {
  178. /*
  179. * On ia64 if a page has been mapped somewhere as
  180. * uncached, then it must also be accessed uncached
  181. * by the kernel or data corruption may occur.
  182. */
  183. ptr = xlate_dev_mem_ptr(p);
  184. if (!ptr) {
  185. if (written)
  186. break;
  187. return -EFAULT;
  188. }
  189. copied = copy_from_user(ptr, buf, sz);
  190. unxlate_dev_mem_ptr(p, ptr);
  191. if (copied) {
  192. written += sz - copied;
  193. if (written)
  194. break;
  195. return -EFAULT;
  196. }
  197. }
  198. buf += sz;
  199. p += sz;
  200. count -= sz;
  201. written += sz;
  202. }
  203. *ppos += written;
  204. return written;
  205. }
  206. int __weak phys_mem_access_prot_allowed(struct file *file,
  207. unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
  208. {
  209. return 1;
  210. }
  211. #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
  212. /*
  213. * Architectures vary in how they handle caching for addresses
  214. * outside of main memory.
  215. *
  216. */
  217. #ifdef pgprot_noncached
  218. static int uncached_access(struct file *file, phys_addr_t addr)
  219. {
  220. #if defined(CONFIG_IA64)
  221. /*
  222. * On ia64, we ignore O_DSYNC because we cannot tolerate memory
  223. * attribute aliases.
  224. */
  225. return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
  226. #elif defined(CONFIG_MIPS)
  227. {
  228. extern int __uncached_access(struct file *file,
  229. unsigned long addr);
  230. return __uncached_access(file, addr);
  231. }
  232. #else
  233. /*
  234. * Accessing memory above the top the kernel knows about or through a
  235. * file pointer
  236. * that was marked O_DSYNC will be done non-cached.
  237. */
  238. if (file->f_flags & O_DSYNC)
  239. return 1;
  240. return addr >= __pa(high_memory);
  241. #endif
  242. }
  243. #endif
  244. static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
  245. unsigned long size, pgprot_t vma_prot)
  246. {
  247. #ifdef pgprot_noncached
  248. phys_addr_t offset = pfn << PAGE_SHIFT;
  249. if (uncached_access(file, offset))
  250. return pgprot_noncached(vma_prot);
  251. #endif
  252. return vma_prot;
  253. }
  254. #endif
  255. #ifndef CONFIG_MMU
  256. static unsigned long get_unmapped_area_mem(struct file *file,
  257. unsigned long addr,
  258. unsigned long len,
  259. unsigned long pgoff,
  260. unsigned long flags)
  261. {
  262. if (!valid_mmap_phys_addr_range(pgoff, len))
  263. return (unsigned long) -EINVAL;
  264. return pgoff << PAGE_SHIFT;
  265. }
  266. /* permit direct mmap, for read, write or exec */
  267. static unsigned memory_mmap_capabilities(struct file *file)
  268. {
  269. return NOMMU_MAP_DIRECT |
  270. NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
  271. }
  272. static unsigned zero_mmap_capabilities(struct file *file)
  273. {
  274. return NOMMU_MAP_COPY;
  275. }
  276. /* can't do an in-place private mapping if there's no MMU */
  277. static inline int private_mapping_ok(struct vm_area_struct *vma)
  278. {
  279. return vma->vm_flags & VM_MAYSHARE;
  280. }
  281. #else
  282. static inline int private_mapping_ok(struct vm_area_struct *vma)
  283. {
  284. return 1;
  285. }
  286. #endif
  287. static const struct vm_operations_struct mmap_mem_ops = {
  288. #ifdef CONFIG_HAVE_IOREMAP_PROT
  289. .access = generic_access_phys
  290. #endif
  291. };
  292. static int mmap_mem(struct file *file, struct vm_area_struct *vma)
  293. {
  294. size_t size = vma->vm_end - vma->vm_start;
  295. phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
  296. /* It's illegal to wrap around the end of the physical address space. */
  297. if (offset + (phys_addr_t)size - 1 < offset)
  298. return -EINVAL;
  299. if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
  300. return -EINVAL;
  301. if (!private_mapping_ok(vma))
  302. return -ENOSYS;
  303. if (!range_is_allowed(vma->vm_pgoff, size))
  304. return -EPERM;
  305. if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
  306. &vma->vm_page_prot))
  307. return -EINVAL;
  308. vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
  309. size,
  310. vma->vm_page_prot);
  311. vma->vm_ops = &mmap_mem_ops;
  312. /* Remap-pfn-range will mark the range VM_IO */
  313. if (remap_pfn_range(vma,
  314. vma->vm_start,
  315. vma->vm_pgoff,
  316. size,
  317. vma->vm_page_prot)) {
  318. return -EAGAIN;
  319. }
  320. return 0;
  321. }
  322. static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
  323. {
  324. unsigned long pfn;
  325. /* Turn a kernel-virtual address into a physical page frame */
  326. pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
  327. /*
  328. * RED-PEN: on some architectures there is more mapped memory than
  329. * available in mem_map which pfn_valid checks for. Perhaps should add a
  330. * new macro here.
  331. *
  332. * RED-PEN: vmalloc is not supported right now.
  333. */
  334. if (!pfn_valid(pfn))
  335. return -EIO;
  336. vma->vm_pgoff = pfn;
  337. return mmap_mem(file, vma);
  338. }
  339. /*
  340. * This function reads the *virtual* memory as seen by the kernel.
  341. */
  342. static ssize_t read_kmem(struct file *file, char __user *buf,
  343. size_t count, loff_t *ppos)
  344. {
  345. unsigned long p = *ppos;
  346. ssize_t low_count, read, sz;
  347. char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
  348. int err = 0;
  349. read = 0;
  350. if (p < (unsigned long) high_memory) {
  351. low_count = count;
  352. if (count > (unsigned long)high_memory - p)
  353. low_count = (unsigned long)high_memory - p;
  354. #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
  355. /* we don't have page 0 mapped on sparc and m68k.. */
  356. if (p < PAGE_SIZE && low_count > 0) {
  357. sz = size_inside_page(p, low_count);
  358. if (clear_user(buf, sz))
  359. return -EFAULT;
  360. buf += sz;
  361. p += sz;
  362. read += sz;
  363. low_count -= sz;
  364. count -= sz;
  365. }
  366. #endif
  367. while (low_count > 0) {
  368. sz = size_inside_page(p, low_count);
  369. /*
  370. * On ia64 if a page has been mapped somewhere as
  371. * uncached, then it must also be accessed uncached
  372. * by the kernel or data corruption may occur
  373. */
  374. kbuf = xlate_dev_kmem_ptr((void *)p);
  375. if (!virt_addr_valid(kbuf))
  376. return -ENXIO;
  377. if (copy_to_user(buf, kbuf, sz))
  378. return -EFAULT;
  379. buf += sz;
  380. p += sz;
  381. read += sz;
  382. low_count -= sz;
  383. count -= sz;
  384. }
  385. }
  386. if (count > 0) {
  387. kbuf = (char *)__get_free_page(GFP_KERNEL);
  388. if (!kbuf)
  389. return -ENOMEM;
  390. while (count > 0) {
  391. sz = size_inside_page(p, count);
  392. if (!is_vmalloc_or_module_addr((void *)p)) {
  393. err = -ENXIO;
  394. break;
  395. }
  396. sz = vread(kbuf, (char *)p, sz);
  397. if (!sz)
  398. break;
  399. if (copy_to_user(buf, kbuf, sz)) {
  400. err = -EFAULT;
  401. break;
  402. }
  403. count -= sz;
  404. buf += sz;
  405. read += sz;
  406. p += sz;
  407. }
  408. free_page((unsigned long)kbuf);
  409. }
  410. *ppos = p;
  411. return read ? read : err;
  412. }
  413. static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
  414. size_t count, loff_t *ppos)
  415. {
  416. ssize_t written, sz;
  417. unsigned long copied;
  418. written = 0;
  419. #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
  420. /* we don't have page 0 mapped on sparc and m68k.. */
  421. if (p < PAGE_SIZE) {
  422. sz = size_inside_page(p, count);
  423. /* Hmm. Do something? */
  424. buf += sz;
  425. p += sz;
  426. count -= sz;
  427. written += sz;
  428. }
  429. #endif
  430. while (count > 0) {
  431. void *ptr;
  432. sz = size_inside_page(p, count);
  433. /*
  434. * On ia64 if a page has been mapped somewhere as uncached, then
  435. * it must also be accessed uncached by the kernel or data
  436. * corruption may occur.
  437. */
  438. ptr = xlate_dev_kmem_ptr((void *)p);
  439. if (!virt_addr_valid(ptr))
  440. return -ENXIO;
  441. copied = copy_from_user(ptr, buf, sz);
  442. if (copied) {
  443. written += sz - copied;
  444. if (written)
  445. break;
  446. return -EFAULT;
  447. }
  448. buf += sz;
  449. p += sz;
  450. count -= sz;
  451. written += sz;
  452. }
  453. *ppos += written;
  454. return written;
  455. }
  456. /*
  457. * This function writes to the *virtual* memory as seen by the kernel.
  458. */
  459. static ssize_t write_kmem(struct file *file, const char __user *buf,
  460. size_t count, loff_t *ppos)
  461. {
  462. unsigned long p = *ppos;
  463. ssize_t wrote = 0;
  464. ssize_t virtr = 0;
  465. char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
  466. int err = 0;
  467. if (p < (unsigned long) high_memory) {
  468. unsigned long to_write = min_t(unsigned long, count,
  469. (unsigned long)high_memory - p);
  470. wrote = do_write_kmem(p, buf, to_write, ppos);
  471. if (wrote != to_write)
  472. return wrote;
  473. p += wrote;
  474. buf += wrote;
  475. count -= wrote;
  476. }
  477. if (count > 0) {
  478. kbuf = (char *)__get_free_page(GFP_KERNEL);
  479. if (!kbuf)
  480. return wrote ? wrote : -ENOMEM;
  481. while (count > 0) {
  482. unsigned long sz = size_inside_page(p, count);
  483. unsigned long n;
  484. if (!is_vmalloc_or_module_addr((void *)p)) {
  485. err = -ENXIO;
  486. break;
  487. }
  488. n = copy_from_user(kbuf, buf, sz);
  489. if (n) {
  490. err = -EFAULT;
  491. break;
  492. }
  493. vwrite(kbuf, (char *)p, sz);
  494. count -= sz;
  495. buf += sz;
  496. virtr += sz;
  497. p += sz;
  498. }
  499. free_page((unsigned long)kbuf);
  500. }
  501. *ppos = p;
  502. return virtr + wrote ? : err;
  503. }
  504. static ssize_t read_port(struct file *file, char __user *buf,
  505. size_t count, loff_t *ppos)
  506. {
  507. unsigned long i = *ppos;
  508. char __user *tmp = buf;
  509. if (!access_ok(VERIFY_WRITE, buf, count))
  510. return -EFAULT;
  511. while (count-- > 0 && i < 65536) {
  512. if (__put_user(inb(i), tmp) < 0)
  513. return -EFAULT;
  514. i++;
  515. tmp++;
  516. }
  517. *ppos = i;
  518. return tmp-buf;
  519. }
  520. static ssize_t write_port(struct file *file, const char __user *buf,
  521. size_t count, loff_t *ppos)
  522. {
  523. unsigned long i = *ppos;
  524. const char __user *tmp = buf;
  525. if (!access_ok(VERIFY_READ, buf, count))
  526. return -EFAULT;
  527. while (count-- > 0 && i < 65536) {
  528. char c;
  529. if (__get_user(c, tmp)) {
  530. if (tmp > buf)
  531. break;
  532. return -EFAULT;
  533. }
  534. outb(c, i);
  535. i++;
  536. tmp++;
  537. }
  538. *ppos = i;
  539. return tmp-buf;
  540. }
  541. static ssize_t read_null(struct file *file, char __user *buf,
  542. size_t count, loff_t *ppos)
  543. {
  544. return 0;
  545. }
  546. static ssize_t write_null(struct file *file, const char __user *buf,
  547. size_t count, loff_t *ppos)
  548. {
  549. return count;
  550. }
  551. static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to)
  552. {
  553. return 0;
  554. }
  555. static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from)
  556. {
  557. size_t count = iov_iter_count(from);
  558. iov_iter_advance(from, count);
  559. return count;
  560. }
  561. static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
  562. struct splice_desc *sd)
  563. {
  564. return sd->len;
  565. }
  566. static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
  567. loff_t *ppos, size_t len, unsigned int flags)
  568. {
  569. return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
  570. }
  571. static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
  572. {
  573. size_t written = 0;
  574. while (iov_iter_count(iter)) {
  575. size_t chunk = iov_iter_count(iter), n;
  576. if (chunk > PAGE_SIZE)
  577. chunk = PAGE_SIZE; /* Just for latency reasons */
  578. n = iov_iter_zero(chunk, iter);
  579. if (!n && iov_iter_count(iter))
  580. return written ? written : -EFAULT;
  581. written += n;
  582. if (signal_pending(current))
  583. return written ? written : -ERESTARTSYS;
  584. cond_resched();
  585. }
  586. return written;
  587. }
  588. static int mmap_zero(struct file *file, struct vm_area_struct *vma)
  589. {
  590. #ifndef CONFIG_MMU
  591. return -ENOSYS;
  592. #endif
  593. if (vma->vm_flags & VM_SHARED)
  594. return shmem_zero_setup(vma);
  595. return 0;
  596. }
  597. static unsigned long get_unmapped_area_zero(struct file *file,
  598. unsigned long addr, unsigned long len,
  599. unsigned long pgoff, unsigned long flags)
  600. {
  601. #ifdef CONFIG_MMU
  602. if (flags & MAP_SHARED) {
  603. /*
  604. * mmap_zero() will call shmem_zero_setup() to create a file,
  605. * so use shmem's get_unmapped_area in case it can be huge;
  606. * and pass NULL for file as in mmap.c's get_unmapped_area(),
  607. * so as not to confuse shmem with our handle on "/dev/zero".
  608. */
  609. return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags);
  610. }
  611. /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */
  612. return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
  613. #else
  614. return -ENOSYS;
  615. #endif
  616. }
  617. static ssize_t write_full(struct file *file, const char __user *buf,
  618. size_t count, loff_t *ppos)
  619. {
  620. return -ENOSPC;
  621. }
  622. /*
  623. * Special lseek() function for /dev/null and /dev/zero. Most notably, you
  624. * can fopen() both devices with "a" now. This was previously impossible.
  625. * -- SRB.
  626. */
  627. static loff_t null_lseek(struct file *file, loff_t offset, int orig)
  628. {
  629. return file->f_pos = 0;
  630. }
  631. /*
  632. * The memory devices use the full 32/64 bits of the offset, and so we cannot
  633. * check against negative addresses: they are ok. The return value is weird,
  634. * though, in that case (0).
  635. *
  636. * also note that seeking relative to the "end of file" isn't supported:
  637. * it has no meaning, so it returns -EINVAL.
  638. */
  639. static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
  640. {
  641. loff_t ret;
  642. inode_lock(file_inode(file));
  643. switch (orig) {
  644. case SEEK_CUR:
  645. offset += file->f_pos;
  646. case SEEK_SET:
  647. /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
  648. if ((unsigned long long)offset >= -MAX_ERRNO) {
  649. ret = -EOVERFLOW;
  650. break;
  651. }
  652. file->f_pos = offset;
  653. ret = file->f_pos;
  654. force_successful_syscall_return();
  655. break;
  656. default:
  657. ret = -EINVAL;
  658. }
  659. inode_unlock(file_inode(file));
  660. return ret;
  661. }
  662. static int open_port(struct inode *inode, struct file *filp)
  663. {
  664. return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
  665. }
  666. #define zero_lseek null_lseek
  667. #define full_lseek null_lseek
  668. #define write_zero write_null
  669. #define write_iter_zero write_iter_null
  670. #define open_mem open_port
  671. #define open_kmem open_mem
  672. static const struct file_operations __maybe_unused mem_fops = {
  673. .llseek = memory_lseek,
  674. .read = read_mem,
  675. .write = write_mem,
  676. .mmap = mmap_mem,
  677. .open = open_mem,
  678. #ifndef CONFIG_MMU
  679. .get_unmapped_area = get_unmapped_area_mem,
  680. .mmap_capabilities = memory_mmap_capabilities,
  681. #endif
  682. };
  683. static const struct file_operations __maybe_unused kmem_fops = {
  684. .llseek = memory_lseek,
  685. .read = read_kmem,
  686. .write = write_kmem,
  687. .mmap = mmap_kmem,
  688. .open = open_kmem,
  689. #ifndef CONFIG_MMU
  690. .get_unmapped_area = get_unmapped_area_mem,
  691. .mmap_capabilities = memory_mmap_capabilities,
  692. #endif
  693. };
  694. static const struct file_operations null_fops = {
  695. .llseek = null_lseek,
  696. .read = read_null,
  697. .write = write_null,
  698. .read_iter = read_iter_null,
  699. .write_iter = write_iter_null,
  700. .splice_write = splice_write_null,
  701. };
  702. static const struct file_operations __maybe_unused port_fops = {
  703. .llseek = memory_lseek,
  704. .read = read_port,
  705. .write = write_port,
  706. .open = open_port,
  707. };
  708. static const struct file_operations zero_fops = {
  709. .llseek = zero_lseek,
  710. .write = write_zero,
  711. .read_iter = read_iter_zero,
  712. .write_iter = write_iter_zero,
  713. .mmap = mmap_zero,
  714. .get_unmapped_area = get_unmapped_area_zero,
  715. #ifndef CONFIG_MMU
  716. .mmap_capabilities = zero_mmap_capabilities,
  717. #endif
  718. };
  719. static const struct file_operations full_fops = {
  720. .llseek = full_lseek,
  721. .read_iter = read_iter_zero,
  722. .write = write_full,
  723. };
  724. static const struct memdev {
  725. const char *name;
  726. umode_t mode;
  727. const struct file_operations *fops;
  728. fmode_t fmode;
  729. } devlist[] = {
  730. #ifdef CONFIG_DEVMEM
  731. [1] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET },
  732. #endif
  733. #ifdef CONFIG_DEVKMEM
  734. [2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET },
  735. #endif
  736. [3] = { "null", 0666, &null_fops, 0 },
  737. #ifdef CONFIG_DEVPORT
  738. [4] = { "port", 0, &port_fops, 0 },
  739. #endif
  740. [5] = { "zero", 0666, &zero_fops, 0 },
  741. [7] = { "full", 0666, &full_fops, 0 },
  742. [8] = { "random", 0666, &random_fops, 0 },
  743. [9] = { "urandom", 0666, &urandom_fops, 0 },
  744. #ifdef CONFIG_PRINTK
  745. [11] = { "kmsg", 0644, &kmsg_fops, 0 },
  746. #endif
  747. };
  748. static int memory_open(struct inode *inode, struct file *filp)
  749. {
  750. int minor;
  751. const struct memdev *dev;
  752. minor = iminor(inode);
  753. if (minor >= ARRAY_SIZE(devlist))
  754. return -ENXIO;
  755. dev = &devlist[minor];
  756. if (!dev->fops)
  757. return -ENXIO;
  758. filp->f_op = dev->fops;
  759. filp->f_mode |= dev->fmode;
  760. if (dev->fops->open)
  761. return dev->fops->open(inode, filp);
  762. return 0;
  763. }
  764. static const struct file_operations memory_fops = {
  765. .open = memory_open,
  766. .llseek = noop_llseek,
  767. };
  768. static char *mem_devnode(struct device *dev, umode_t *mode)
  769. {
  770. if (mode && devlist[MINOR(dev->devt)].mode)
  771. *mode = devlist[MINOR(dev->devt)].mode;
  772. return NULL;
  773. }
  774. static struct class *mem_class;
  775. static int __init chr_dev_init(void)
  776. {
  777. int minor;
  778. if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
  779. printk("unable to get major %d for memory devs\n", MEM_MAJOR);
  780. mem_class = class_create(THIS_MODULE, "mem");
  781. if (IS_ERR(mem_class))
  782. return PTR_ERR(mem_class);
  783. mem_class->devnode = mem_devnode;
  784. for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
  785. if (!devlist[minor].name)
  786. continue;
  787. /*
  788. * Create /dev/port?
  789. */
  790. if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
  791. continue;
  792. device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
  793. NULL, devlist[minor].name);
  794. }
  795. return tty_init();
  796. }
  797. fs_initcall(chr_dev_init);