maccess.c 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213
  1. /*
  2. * Access kernel memory without faulting -- s390 specific implementation.
  3. *
  4. * Copyright IBM Corp. 2009, 2015
  5. *
  6. * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
  7. *
  8. */
  9. #include <linux/uaccess.h>
  10. #include <linux/kernel.h>
  11. #include <linux/types.h>
  12. #include <linux/errno.h>
  13. #include <linux/gfp.h>
  14. #include <linux/cpu.h>
  15. #include <asm/ctl_reg.h>
  16. #include <asm/io.h>
  17. static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t size)
  18. {
  19. unsigned long aligned, offset, count;
  20. char tmp[8];
  21. aligned = (unsigned long) dst & ~7UL;
  22. offset = (unsigned long) dst & 7UL;
  23. size = min(8UL - offset, size);
  24. count = size - 1;
  25. asm volatile(
  26. " bras 1,0f\n"
  27. " mvc 0(1,%4),0(%5)\n"
  28. "0: mvc 0(8,%3),0(%0)\n"
  29. " ex %1,0(1)\n"
  30. " lg %1,0(%3)\n"
  31. " lra %0,0(%0)\n"
  32. " sturg %1,%0\n"
  33. : "+&a" (aligned), "+&a" (count), "=m" (tmp)
  34. : "a" (&tmp), "a" (&tmp[offset]), "a" (src)
  35. : "cc", "memory", "1");
  36. return size;
  37. }
  38. /*
  39. * s390_kernel_write - write to kernel memory bypassing DAT
  40. * @dst: destination address
  41. * @src: source address
  42. * @size: number of bytes to copy
  43. *
  44. * This function writes to kernel memory bypassing DAT and possible page table
  45. * write protection. It writes to the destination using the sturg instruction.
  46. * Therefore we have a read-modify-write sequence: the function reads eight
  47. * bytes from destination at an eight byte boundary, modifies the bytes
  48. * requested and writes the result back in a loop.
  49. *
  50. * Note: this means that this function may not be called concurrently on
  51. * several cpus with overlapping words, since this may potentially
  52. * cause data corruption.
  53. */
  54. void notrace s390_kernel_write(void *dst, const void *src, size_t size)
  55. {
  56. long copied;
  57. while (size) {
  58. copied = s390_kernel_write_odd(dst, src, size);
  59. dst += copied;
  60. src += copied;
  61. size -= copied;
  62. }
  63. }
  64. static int __memcpy_real(void *dest, void *src, size_t count)
  65. {
  66. register unsigned long _dest asm("2") = (unsigned long) dest;
  67. register unsigned long _len1 asm("3") = (unsigned long) count;
  68. register unsigned long _src asm("4") = (unsigned long) src;
  69. register unsigned long _len2 asm("5") = (unsigned long) count;
  70. int rc = -EFAULT;
  71. asm volatile (
  72. "0: mvcle %1,%2,0x0\n"
  73. "1: jo 0b\n"
  74. " lhi %0,0x0\n"
  75. "2:\n"
  76. EX_TABLE(1b,2b)
  77. : "+d" (rc), "+d" (_dest), "+d" (_src), "+d" (_len1),
  78. "+d" (_len2), "=m" (*((long *) dest))
  79. : "m" (*((long *) src))
  80. : "cc", "memory");
  81. return rc;
  82. }
  83. /*
  84. * Copy memory in real mode (kernel to kernel)
  85. */
  86. int memcpy_real(void *dest, void *src, size_t count)
  87. {
  88. int irqs_disabled, rc;
  89. unsigned long flags;
  90. if (!count)
  91. return 0;
  92. flags = __arch_local_irq_stnsm(0xf8UL);
  93. irqs_disabled = arch_irqs_disabled_flags(flags);
  94. if (!irqs_disabled)
  95. trace_hardirqs_off();
  96. rc = __memcpy_real(dest, src, count);
  97. if (!irqs_disabled)
  98. trace_hardirqs_on();
  99. __arch_local_irq_ssm(flags);
  100. return rc;
  101. }
  102. /*
  103. * Copy memory in absolute mode (kernel to kernel)
  104. */
  105. void memcpy_absolute(void *dest, void *src, size_t count)
  106. {
  107. unsigned long cr0, flags, prefix;
  108. flags = arch_local_irq_save();
  109. __ctl_store(cr0, 0, 0);
  110. __ctl_clear_bit(0, 28); /* disable lowcore protection */
  111. prefix = store_prefix();
  112. if (prefix) {
  113. local_mcck_disable();
  114. set_prefix(0);
  115. memcpy(dest, src, count);
  116. set_prefix(prefix);
  117. local_mcck_enable();
  118. } else {
  119. memcpy(dest, src, count);
  120. }
  121. __ctl_load(cr0, 0, 0);
  122. arch_local_irq_restore(flags);
  123. }
  124. /*
  125. * Copy memory from kernel (real) to user (virtual)
  126. */
  127. int copy_to_user_real(void __user *dest, void *src, unsigned long count)
  128. {
  129. int offs = 0, size, rc;
  130. char *buf;
  131. buf = (char *) __get_free_page(GFP_KERNEL);
  132. if (!buf)
  133. return -ENOMEM;
  134. rc = -EFAULT;
  135. while (offs < count) {
  136. size = min(PAGE_SIZE, count - offs);
  137. if (memcpy_real(buf, src + offs, size))
  138. goto out;
  139. if (copy_to_user(dest + offs, buf, size))
  140. goto out;
  141. offs += size;
  142. }
  143. rc = 0;
  144. out:
  145. free_page((unsigned long) buf);
  146. return rc;
  147. }
  148. /*
  149. * Check if physical address is within prefix or zero page
  150. */
  151. static int is_swapped(unsigned long addr)
  152. {
  153. unsigned long lc;
  154. int cpu;
  155. if (addr < sizeof(struct lowcore))
  156. return 1;
  157. for_each_online_cpu(cpu) {
  158. lc = (unsigned long) lowcore_ptr[cpu];
  159. if (addr > lc + sizeof(struct lowcore) - 1 || addr < lc)
  160. continue;
  161. return 1;
  162. }
  163. return 0;
  164. }
  165. /*
  166. * Convert a physical pointer for /dev/mem access
  167. *
  168. * For swapped prefix pages a new buffer is returned that contains a copy of
  169. * the absolute memory. The buffer size is maximum one page large.
  170. */
  171. void *xlate_dev_mem_ptr(phys_addr_t addr)
  172. {
  173. void *bounce = (void *) addr;
  174. unsigned long size;
  175. get_online_cpus();
  176. preempt_disable();
  177. if (is_swapped(addr)) {
  178. size = PAGE_SIZE - (addr & ~PAGE_MASK);
  179. bounce = (void *) __get_free_page(GFP_ATOMIC);
  180. if (bounce)
  181. memcpy_absolute(bounce, (void *) addr, size);
  182. }
  183. preempt_enable();
  184. put_online_cpus();
  185. return bounce;
  186. }
  187. /*
  188. * Free converted buffer for /dev/mem access (if necessary)
  189. */
  190. void unxlate_dev_mem_ptr(phys_addr_t addr, void *buf)
  191. {
  192. if ((void *) addr != buf)
  193. free_page((unsigned long) buf);
  194. }