usercopy_32.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805
  1. /*
  2. * User address space access functions.
  3. * The non inlined parts of asm-i386/uaccess.h are here.
  4. *
  5. * Copyright 1997 Andi Kleen <ak@muc.de>
  6. * Copyright 1997 Linus Torvalds
  7. */
  8. #include <linux/mm.h>
  9. #include <linux/highmem.h>
  10. #include <linux/blkdev.h>
  11. #include <linux/module.h>
  12. #include <linux/backing-dev.h>
  13. #include <linux/interrupt.h>
  14. #include <asm/uaccess.h>
  15. #include <asm/mmx.h>
  16. #ifdef CONFIG_X86_INTEL_USERCOPY
  17. /*
  18. * Alignment at which movsl is preferred for bulk memory copies.
  19. */
  20. struct movsl_mask movsl_mask __read_mostly;
  21. #endif
  22. static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned long n)
  23. {
  24. #ifdef CONFIG_X86_INTEL_USERCOPY
  25. if (n >= 64 && ((a1 ^ a2) & movsl_mask.mask))
  26. return 0;
  27. #endif
  28. return 1;
  29. }
  30. #define movsl_is_ok(a1, a2, n) \
  31. __movsl_is_ok((unsigned long)(a1), (unsigned long)(a2), (n))
  32. /*
  33. * Zero Userspace
  34. */
  35. #define __do_clear_user(addr,size) \
  36. do { \
  37. int __d0; \
  38. might_fault(); \
  39. __asm__ __volatile__( \
  40. "0: rep; stosl\n" \
  41. " movl %2,%0\n" \
  42. "1: rep; stosb\n" \
  43. "2:\n" \
  44. ".section .fixup,\"ax\"\n" \
  45. "3: lea 0(%2,%0,4),%0\n" \
  46. " jmp 2b\n" \
  47. ".previous\n" \
  48. _ASM_EXTABLE(0b,3b) \
  49. _ASM_EXTABLE(1b,2b) \
  50. : "=&c"(size), "=&D" (__d0) \
  51. : "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0)); \
  52. } while (0)
  53. /**
  54. * clear_user: - Zero a block of memory in user space.
  55. * @to: Destination address, in user space.
  56. * @n: Number of bytes to zero.
  57. *
  58. * Zero a block of memory in user space.
  59. *
  60. * Returns number of bytes that could not be cleared.
  61. * On success, this will be zero.
  62. */
  63. unsigned long
  64. clear_user(void __user *to, unsigned long n)
  65. {
  66. might_fault();
  67. if (access_ok(VERIFY_WRITE, to, n))
  68. __do_clear_user(to, n);
  69. return n;
  70. }
  71. EXPORT_SYMBOL(clear_user);
  72. /**
  73. * __clear_user: - Zero a block of memory in user space, with less checking.
  74. * @to: Destination address, in user space.
  75. * @n: Number of bytes to zero.
  76. *
  77. * Zero a block of memory in user space. Caller must check
  78. * the specified block with access_ok() before calling this function.
  79. *
  80. * Returns number of bytes that could not be cleared.
  81. * On success, this will be zero.
  82. */
  83. unsigned long
  84. __clear_user(void __user *to, unsigned long n)
  85. {
  86. __do_clear_user(to, n);
  87. return n;
  88. }
  89. EXPORT_SYMBOL(__clear_user);
  90. /**
  91. * strnlen_user: - Get the size of a string in user space.
  92. * @s: The string to measure.
  93. * @n: The maximum valid length
  94. *
  95. * Get the size of a NUL-terminated string in user space.
  96. *
  97. * Returns the size of the string INCLUDING the terminating NUL.
  98. * On exception, returns 0.
  99. * If the string is too long, returns a value greater than @n.
  100. */
  101. long strnlen_user(const char __user *s, long n)
  102. {
  103. unsigned long mask = -__addr_ok(s);
  104. unsigned long res, tmp;
  105. might_fault();
  106. __asm__ __volatile__(
  107. " testl %0, %0\n"
  108. " jz 3f\n"
  109. " andl %0,%%ecx\n"
  110. "0: repne; scasb\n"
  111. " setne %%al\n"
  112. " subl %%ecx,%0\n"
  113. " addl %0,%%eax\n"
  114. "1:\n"
  115. ".section .fixup,\"ax\"\n"
  116. "2: xorl %%eax,%%eax\n"
  117. " jmp 1b\n"
  118. "3: movb $1,%%al\n"
  119. " jmp 1b\n"
  120. ".previous\n"
  121. ".section __ex_table,\"a\"\n"
  122. " .align 4\n"
  123. " .long 0b,2b\n"
  124. ".previous"
  125. :"=&r" (n), "=&D" (s), "=&a" (res), "=&c" (tmp)
  126. :"0" (n), "1" (s), "2" (0), "3" (mask)
  127. :"cc");
  128. return res & mask;
  129. }
  130. EXPORT_SYMBOL(strnlen_user);
  131. #ifdef CONFIG_X86_INTEL_USERCOPY
  132. static unsigned long
  133. __copy_user_intel(void __user *to, const void *from, unsigned long size)
  134. {
  135. int d0, d1;
  136. __asm__ __volatile__(
  137. " .align 2,0x90\n"
  138. "1: movl 32(%4), %%eax\n"
  139. " cmpl $67, %0\n"
  140. " jbe 3f\n"
  141. "2: movl 64(%4), %%eax\n"
  142. " .align 2,0x90\n"
  143. "3: movl 0(%4), %%eax\n"
  144. "4: movl 4(%4), %%edx\n"
  145. "5: movl %%eax, 0(%3)\n"
  146. "6: movl %%edx, 4(%3)\n"
  147. "7: movl 8(%4), %%eax\n"
  148. "8: movl 12(%4),%%edx\n"
  149. "9: movl %%eax, 8(%3)\n"
  150. "10: movl %%edx, 12(%3)\n"
  151. "11: movl 16(%4), %%eax\n"
  152. "12: movl 20(%4), %%edx\n"
  153. "13: movl %%eax, 16(%3)\n"
  154. "14: movl %%edx, 20(%3)\n"
  155. "15: movl 24(%4), %%eax\n"
  156. "16: movl 28(%4), %%edx\n"
  157. "17: movl %%eax, 24(%3)\n"
  158. "18: movl %%edx, 28(%3)\n"
  159. "19: movl 32(%4), %%eax\n"
  160. "20: movl 36(%4), %%edx\n"
  161. "21: movl %%eax, 32(%3)\n"
  162. "22: movl %%edx, 36(%3)\n"
  163. "23: movl 40(%4), %%eax\n"
  164. "24: movl 44(%4), %%edx\n"
  165. "25: movl %%eax, 40(%3)\n"
  166. "26: movl %%edx, 44(%3)\n"
  167. "27: movl 48(%4), %%eax\n"
  168. "28: movl 52(%4), %%edx\n"
  169. "29: movl %%eax, 48(%3)\n"
  170. "30: movl %%edx, 52(%3)\n"
  171. "31: movl 56(%4), %%eax\n"
  172. "32: movl 60(%4), %%edx\n"
  173. "33: movl %%eax, 56(%3)\n"
  174. "34: movl %%edx, 60(%3)\n"
  175. " addl $-64, %0\n"
  176. " addl $64, %4\n"
  177. " addl $64, %3\n"
  178. " cmpl $63, %0\n"
  179. " ja 1b\n"
  180. "35: movl %0, %%eax\n"
  181. " shrl $2, %0\n"
  182. " andl $3, %%eax\n"
  183. " cld\n"
  184. "99: rep; movsl\n"
  185. "36: movl %%eax, %0\n"
  186. "37: rep; movsb\n"
  187. "100:\n"
  188. ".section .fixup,\"ax\"\n"
  189. "101: lea 0(%%eax,%0,4),%0\n"
  190. " jmp 100b\n"
  191. ".previous\n"
  192. ".section __ex_table,\"a\"\n"
  193. " .align 4\n"
  194. " .long 1b,100b\n"
  195. " .long 2b,100b\n"
  196. " .long 3b,100b\n"
  197. " .long 4b,100b\n"
  198. " .long 5b,100b\n"
  199. " .long 6b,100b\n"
  200. " .long 7b,100b\n"
  201. " .long 8b,100b\n"
  202. " .long 9b,100b\n"
  203. " .long 10b,100b\n"
  204. " .long 11b,100b\n"
  205. " .long 12b,100b\n"
  206. " .long 13b,100b\n"
  207. " .long 14b,100b\n"
  208. " .long 15b,100b\n"
  209. " .long 16b,100b\n"
  210. " .long 17b,100b\n"
  211. " .long 18b,100b\n"
  212. " .long 19b,100b\n"
  213. " .long 20b,100b\n"
  214. " .long 21b,100b\n"
  215. " .long 22b,100b\n"
  216. " .long 23b,100b\n"
  217. " .long 24b,100b\n"
  218. " .long 25b,100b\n"
  219. " .long 26b,100b\n"
  220. " .long 27b,100b\n"
  221. " .long 28b,100b\n"
  222. " .long 29b,100b\n"
  223. " .long 30b,100b\n"
  224. " .long 31b,100b\n"
  225. " .long 32b,100b\n"
  226. " .long 33b,100b\n"
  227. " .long 34b,100b\n"
  228. " .long 35b,100b\n"
  229. " .long 36b,100b\n"
  230. " .long 37b,100b\n"
  231. " .long 99b,101b\n"
  232. ".previous"
  233. : "=&c"(size), "=&D" (d0), "=&S" (d1)
  234. : "1"(to), "2"(from), "0"(size)
  235. : "eax", "edx", "memory");
  236. return size;
  237. }
  238. static unsigned long
  239. __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
  240. {
  241. int d0, d1;
  242. __asm__ __volatile__(
  243. " .align 2,0x90\n"
  244. "0: movl 32(%4), %%eax\n"
  245. " cmpl $67, %0\n"
  246. " jbe 2f\n"
  247. "1: movl 64(%4), %%eax\n"
  248. " .align 2,0x90\n"
  249. "2: movl 0(%4), %%eax\n"
  250. "21: movl 4(%4), %%edx\n"
  251. " movl %%eax, 0(%3)\n"
  252. " movl %%edx, 4(%3)\n"
  253. "3: movl 8(%4), %%eax\n"
  254. "31: movl 12(%4),%%edx\n"
  255. " movl %%eax, 8(%3)\n"
  256. " movl %%edx, 12(%3)\n"
  257. "4: movl 16(%4), %%eax\n"
  258. "41: movl 20(%4), %%edx\n"
  259. " movl %%eax, 16(%3)\n"
  260. " movl %%edx, 20(%3)\n"
  261. "10: movl 24(%4), %%eax\n"
  262. "51: movl 28(%4), %%edx\n"
  263. " movl %%eax, 24(%3)\n"
  264. " movl %%edx, 28(%3)\n"
  265. "11: movl 32(%4), %%eax\n"
  266. "61: movl 36(%4), %%edx\n"
  267. " movl %%eax, 32(%3)\n"
  268. " movl %%edx, 36(%3)\n"
  269. "12: movl 40(%4), %%eax\n"
  270. "71: movl 44(%4), %%edx\n"
  271. " movl %%eax, 40(%3)\n"
  272. " movl %%edx, 44(%3)\n"
  273. "13: movl 48(%4), %%eax\n"
  274. "81: movl 52(%4), %%edx\n"
  275. " movl %%eax, 48(%3)\n"
  276. " movl %%edx, 52(%3)\n"
  277. "14: movl 56(%4), %%eax\n"
  278. "91: movl 60(%4), %%edx\n"
  279. " movl %%eax, 56(%3)\n"
  280. " movl %%edx, 60(%3)\n"
  281. " addl $-64, %0\n"
  282. " addl $64, %4\n"
  283. " addl $64, %3\n"
  284. " cmpl $63, %0\n"
  285. " ja 0b\n"
  286. "5: movl %0, %%eax\n"
  287. " shrl $2, %0\n"
  288. " andl $3, %%eax\n"
  289. " cld\n"
  290. "6: rep; movsl\n"
  291. " movl %%eax,%0\n"
  292. "7: rep; movsb\n"
  293. "8:\n"
  294. ".section .fixup,\"ax\"\n"
  295. "9: lea 0(%%eax,%0,4),%0\n"
  296. "16: pushl %0\n"
  297. " pushl %%eax\n"
  298. " xorl %%eax,%%eax\n"
  299. " rep; stosb\n"
  300. " popl %%eax\n"
  301. " popl %0\n"
  302. " jmp 8b\n"
  303. ".previous\n"
  304. ".section __ex_table,\"a\"\n"
  305. " .align 4\n"
  306. " .long 0b,16b\n"
  307. " .long 1b,16b\n"
  308. " .long 2b,16b\n"
  309. " .long 21b,16b\n"
  310. " .long 3b,16b\n"
  311. " .long 31b,16b\n"
  312. " .long 4b,16b\n"
  313. " .long 41b,16b\n"
  314. " .long 10b,16b\n"
  315. " .long 51b,16b\n"
  316. " .long 11b,16b\n"
  317. " .long 61b,16b\n"
  318. " .long 12b,16b\n"
  319. " .long 71b,16b\n"
  320. " .long 13b,16b\n"
  321. " .long 81b,16b\n"
  322. " .long 14b,16b\n"
  323. " .long 91b,16b\n"
  324. " .long 6b,9b\n"
  325. " .long 7b,16b\n"
  326. ".previous"
  327. : "=&c"(size), "=&D" (d0), "=&S" (d1)
  328. : "1"(to), "2"(from), "0"(size)
  329. : "eax", "edx", "memory");
  330. return size;
  331. }
  332. /*
  333. * Non Temporal Hint version of __copy_user_zeroing_intel. It is cache aware.
  334. * hyoshiok@miraclelinux.com
  335. */
  336. static unsigned long __copy_user_zeroing_intel_nocache(void *to,
  337. const void __user *from, unsigned long size)
  338. {
  339. int d0, d1;
  340. __asm__ __volatile__(
  341. " .align 2,0x90\n"
  342. "0: movl 32(%4), %%eax\n"
  343. " cmpl $67, %0\n"
  344. " jbe 2f\n"
  345. "1: movl 64(%4), %%eax\n"
  346. " .align 2,0x90\n"
  347. "2: movl 0(%4), %%eax\n"
  348. "21: movl 4(%4), %%edx\n"
  349. " movnti %%eax, 0(%3)\n"
  350. " movnti %%edx, 4(%3)\n"
  351. "3: movl 8(%4), %%eax\n"
  352. "31: movl 12(%4),%%edx\n"
  353. " movnti %%eax, 8(%3)\n"
  354. " movnti %%edx, 12(%3)\n"
  355. "4: movl 16(%4), %%eax\n"
  356. "41: movl 20(%4), %%edx\n"
  357. " movnti %%eax, 16(%3)\n"
  358. " movnti %%edx, 20(%3)\n"
  359. "10: movl 24(%4), %%eax\n"
  360. "51: movl 28(%4), %%edx\n"
  361. " movnti %%eax, 24(%3)\n"
  362. " movnti %%edx, 28(%3)\n"
  363. "11: movl 32(%4), %%eax\n"
  364. "61: movl 36(%4), %%edx\n"
  365. " movnti %%eax, 32(%3)\n"
  366. " movnti %%edx, 36(%3)\n"
  367. "12: movl 40(%4), %%eax\n"
  368. "71: movl 44(%4), %%edx\n"
  369. " movnti %%eax, 40(%3)\n"
  370. " movnti %%edx, 44(%3)\n"
  371. "13: movl 48(%4), %%eax\n"
  372. "81: movl 52(%4), %%edx\n"
  373. " movnti %%eax, 48(%3)\n"
  374. " movnti %%edx, 52(%3)\n"
  375. "14: movl 56(%4), %%eax\n"
  376. "91: movl 60(%4), %%edx\n"
  377. " movnti %%eax, 56(%3)\n"
  378. " movnti %%edx, 60(%3)\n"
  379. " addl $-64, %0\n"
  380. " addl $64, %4\n"
  381. " addl $64, %3\n"
  382. " cmpl $63, %0\n"
  383. " ja 0b\n"
  384. " sfence \n"
  385. "5: movl %0, %%eax\n"
  386. " shrl $2, %0\n"
  387. " andl $3, %%eax\n"
  388. " cld\n"
  389. "6: rep; movsl\n"
  390. " movl %%eax,%0\n"
  391. "7: rep; movsb\n"
  392. "8:\n"
  393. ".section .fixup,\"ax\"\n"
  394. "9: lea 0(%%eax,%0,4),%0\n"
  395. "16: pushl %0\n"
  396. " pushl %%eax\n"
  397. " xorl %%eax,%%eax\n"
  398. " rep; stosb\n"
  399. " popl %%eax\n"
  400. " popl %0\n"
  401. " jmp 8b\n"
  402. ".previous\n"
  403. ".section __ex_table,\"a\"\n"
  404. " .align 4\n"
  405. " .long 0b,16b\n"
  406. " .long 1b,16b\n"
  407. " .long 2b,16b\n"
  408. " .long 21b,16b\n"
  409. " .long 3b,16b\n"
  410. " .long 31b,16b\n"
  411. " .long 4b,16b\n"
  412. " .long 41b,16b\n"
  413. " .long 10b,16b\n"
  414. " .long 51b,16b\n"
  415. " .long 11b,16b\n"
  416. " .long 61b,16b\n"
  417. " .long 12b,16b\n"
  418. " .long 71b,16b\n"
  419. " .long 13b,16b\n"
  420. " .long 81b,16b\n"
  421. " .long 14b,16b\n"
  422. " .long 91b,16b\n"
  423. " .long 6b,9b\n"
  424. " .long 7b,16b\n"
  425. ".previous"
  426. : "=&c"(size), "=&D" (d0), "=&S" (d1)
  427. : "1"(to), "2"(from), "0"(size)
  428. : "eax", "edx", "memory");
  429. return size;
  430. }
  431. static unsigned long __copy_user_intel_nocache(void *to,
  432. const void __user *from, unsigned long size)
  433. {
  434. int d0, d1;
  435. __asm__ __volatile__(
  436. " .align 2,0x90\n"
  437. "0: movl 32(%4), %%eax\n"
  438. " cmpl $67, %0\n"
  439. " jbe 2f\n"
  440. "1: movl 64(%4), %%eax\n"
  441. " .align 2,0x90\n"
  442. "2: movl 0(%4), %%eax\n"
  443. "21: movl 4(%4), %%edx\n"
  444. " movnti %%eax, 0(%3)\n"
  445. " movnti %%edx, 4(%3)\n"
  446. "3: movl 8(%4), %%eax\n"
  447. "31: movl 12(%4),%%edx\n"
  448. " movnti %%eax, 8(%3)\n"
  449. " movnti %%edx, 12(%3)\n"
  450. "4: movl 16(%4), %%eax\n"
  451. "41: movl 20(%4), %%edx\n"
  452. " movnti %%eax, 16(%3)\n"
  453. " movnti %%edx, 20(%3)\n"
  454. "10: movl 24(%4), %%eax\n"
  455. "51: movl 28(%4), %%edx\n"
  456. " movnti %%eax, 24(%3)\n"
  457. " movnti %%edx, 28(%3)\n"
  458. "11: movl 32(%4), %%eax\n"
  459. "61: movl 36(%4), %%edx\n"
  460. " movnti %%eax, 32(%3)\n"
  461. " movnti %%edx, 36(%3)\n"
  462. "12: movl 40(%4), %%eax\n"
  463. "71: movl 44(%4), %%edx\n"
  464. " movnti %%eax, 40(%3)\n"
  465. " movnti %%edx, 44(%3)\n"
  466. "13: movl 48(%4), %%eax\n"
  467. "81: movl 52(%4), %%edx\n"
  468. " movnti %%eax, 48(%3)\n"
  469. " movnti %%edx, 52(%3)\n"
  470. "14: movl 56(%4), %%eax\n"
  471. "91: movl 60(%4), %%edx\n"
  472. " movnti %%eax, 56(%3)\n"
  473. " movnti %%edx, 60(%3)\n"
  474. " addl $-64, %0\n"
  475. " addl $64, %4\n"
  476. " addl $64, %3\n"
  477. " cmpl $63, %0\n"
  478. " ja 0b\n"
  479. " sfence \n"
  480. "5: movl %0, %%eax\n"
  481. " shrl $2, %0\n"
  482. " andl $3, %%eax\n"
  483. " cld\n"
  484. "6: rep; movsl\n"
  485. " movl %%eax,%0\n"
  486. "7: rep; movsb\n"
  487. "8:\n"
  488. ".section .fixup,\"ax\"\n"
  489. "9: lea 0(%%eax,%0,4),%0\n"
  490. "16: jmp 8b\n"
  491. ".previous\n"
  492. ".section __ex_table,\"a\"\n"
  493. " .align 4\n"
  494. " .long 0b,16b\n"
  495. " .long 1b,16b\n"
  496. " .long 2b,16b\n"
  497. " .long 21b,16b\n"
  498. " .long 3b,16b\n"
  499. " .long 31b,16b\n"
  500. " .long 4b,16b\n"
  501. " .long 41b,16b\n"
  502. " .long 10b,16b\n"
  503. " .long 51b,16b\n"
  504. " .long 11b,16b\n"
  505. " .long 61b,16b\n"
  506. " .long 12b,16b\n"
  507. " .long 71b,16b\n"
  508. " .long 13b,16b\n"
  509. " .long 81b,16b\n"
  510. " .long 14b,16b\n"
  511. " .long 91b,16b\n"
  512. " .long 6b,9b\n"
  513. " .long 7b,16b\n"
  514. ".previous"
  515. : "=&c"(size), "=&D" (d0), "=&S" (d1)
  516. : "1"(to), "2"(from), "0"(size)
  517. : "eax", "edx", "memory");
  518. return size;
  519. }
  520. #else
  521. /*
  522. * Leave these declared but undefined. They should not be any references to
  523. * them
  524. */
  525. unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
  526. unsigned long size);
  527. unsigned long __copy_user_intel(void __user *to, const void *from,
  528. unsigned long size);
  529. unsigned long __copy_user_zeroing_intel_nocache(void *to,
  530. const void __user *from, unsigned long size);
  531. #endif /* CONFIG_X86_INTEL_USERCOPY */
  532. /* Generic arbitrary sized copy. */
  533. #define __copy_user(to, from, size) \
  534. do { \
  535. int __d0, __d1, __d2; \
  536. __asm__ __volatile__( \
  537. " cmp $7,%0\n" \
  538. " jbe 1f\n" \
  539. " movl %1,%0\n" \
  540. " negl %0\n" \
  541. " andl $7,%0\n" \
  542. " subl %0,%3\n" \
  543. "4: rep; movsb\n" \
  544. " movl %3,%0\n" \
  545. " shrl $2,%0\n" \
  546. " andl $3,%3\n" \
  547. " .align 2,0x90\n" \
  548. "0: rep; movsl\n" \
  549. " movl %3,%0\n" \
  550. "1: rep; movsb\n" \
  551. "2:\n" \
  552. ".section .fixup,\"ax\"\n" \
  553. "5: addl %3,%0\n" \
  554. " jmp 2b\n" \
  555. "3: lea 0(%3,%0,4),%0\n" \
  556. " jmp 2b\n" \
  557. ".previous\n" \
  558. ".section __ex_table,\"a\"\n" \
  559. " .align 4\n" \
  560. " .long 4b,5b\n" \
  561. " .long 0b,3b\n" \
  562. " .long 1b,2b\n" \
  563. ".previous" \
  564. : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
  565. : "3"(size), "0"(size), "1"(to), "2"(from) \
  566. : "memory"); \
  567. } while (0)
  568. #define __copy_user_zeroing(to, from, size) \
  569. do { \
  570. int __d0, __d1, __d2; \
  571. __asm__ __volatile__( \
  572. " cmp $7,%0\n" \
  573. " jbe 1f\n" \
  574. " movl %1,%0\n" \
  575. " negl %0\n" \
  576. " andl $7,%0\n" \
  577. " subl %0,%3\n" \
  578. "4: rep; movsb\n" \
  579. " movl %3,%0\n" \
  580. " shrl $2,%0\n" \
  581. " andl $3,%3\n" \
  582. " .align 2,0x90\n" \
  583. "0: rep; movsl\n" \
  584. " movl %3,%0\n" \
  585. "1: rep; movsb\n" \
  586. "2:\n" \
  587. ".section .fixup,\"ax\"\n" \
  588. "5: addl %3,%0\n" \
  589. " jmp 6f\n" \
  590. "3: lea 0(%3,%0,4),%0\n" \
  591. "6: pushl %0\n" \
  592. " pushl %%eax\n" \
  593. " xorl %%eax,%%eax\n" \
  594. " rep; stosb\n" \
  595. " popl %%eax\n" \
  596. " popl %0\n" \
  597. " jmp 2b\n" \
  598. ".previous\n" \
  599. ".section __ex_table,\"a\"\n" \
  600. " .align 4\n" \
  601. " .long 4b,5b\n" \
  602. " .long 0b,3b\n" \
  603. " .long 1b,6b\n" \
  604. ".previous" \
  605. : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
  606. : "3"(size), "0"(size), "1"(to), "2"(from) \
  607. : "memory"); \
  608. } while (0)
  609. unsigned long __copy_to_user_ll(void __user *to, const void *from,
  610. unsigned long n)
  611. {
  612. #ifndef CONFIG_X86_WP_WORKS_OK
  613. if (unlikely(boot_cpu_data.wp_works_ok == 0) &&
  614. ((unsigned long)to) < TASK_SIZE) {
  615. /*
  616. * When we are in an atomic section (see
  617. * mm/filemap.c:file_read_actor), return the full
  618. * length to take the slow path.
  619. */
  620. if (in_atomic())
  621. return n;
  622. /*
  623. * CPU does not honor the WP bit when writing
  624. * from supervisory mode, and due to preemption or SMP,
  625. * the page tables can change at any time.
  626. * Do it manually. Manfred <manfred@colorfullife.com>
  627. */
  628. while (n) {
  629. unsigned long offset = ((unsigned long)to)%PAGE_SIZE;
  630. unsigned long len = PAGE_SIZE - offset;
  631. int retval;
  632. struct page *pg;
  633. void *maddr;
  634. if (len > n)
  635. len = n;
  636. survive:
  637. down_read(&current->mm->mmap_sem);
  638. retval = get_user_pages(current, current->mm,
  639. (unsigned long)to, 1, 1, 0, &pg, NULL);
  640. if (retval == -ENOMEM && is_global_init(current)) {
  641. up_read(&current->mm->mmap_sem);
  642. congestion_wait(BLK_RW_ASYNC, HZ/50);
  643. goto survive;
  644. }
  645. if (retval != 1) {
  646. up_read(&current->mm->mmap_sem);
  647. break;
  648. }
  649. maddr = kmap_atomic(pg);
  650. memcpy(maddr + offset, from, len);
  651. kunmap_atomic(maddr);
  652. set_page_dirty_lock(pg);
  653. put_page(pg);
  654. up_read(&current->mm->mmap_sem);
  655. from += len;
  656. to += len;
  657. n -= len;
  658. }
  659. return n;
  660. }
  661. #endif
  662. if (movsl_is_ok(to, from, n))
  663. __copy_user(to, from, n);
  664. else
  665. n = __copy_user_intel(to, from, n);
  666. return n;
  667. }
  668. EXPORT_SYMBOL(__copy_to_user_ll);
  669. unsigned long __copy_from_user_ll(void *to, const void __user *from,
  670. unsigned long n)
  671. {
  672. if (movsl_is_ok(to, from, n))
  673. __copy_user_zeroing(to, from, n);
  674. else
  675. n = __copy_user_zeroing_intel(to, from, n);
  676. return n;
  677. }
  678. EXPORT_SYMBOL(__copy_from_user_ll);
  679. unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
  680. unsigned long n)
  681. {
  682. if (movsl_is_ok(to, from, n))
  683. __copy_user(to, from, n);
  684. else
  685. n = __copy_user_intel((void __user *)to,
  686. (const void *)from, n);
  687. return n;
  688. }
  689. EXPORT_SYMBOL(__copy_from_user_ll_nozero);
  690. unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from,
  691. unsigned long n)
  692. {
  693. #ifdef CONFIG_X86_INTEL_USERCOPY
  694. if (n > 64 && cpu_has_xmm2)
  695. n = __copy_user_zeroing_intel_nocache(to, from, n);
  696. else
  697. __copy_user_zeroing(to, from, n);
  698. #else
  699. __copy_user_zeroing(to, from, n);
  700. #endif
  701. return n;
  702. }
  703. EXPORT_SYMBOL(__copy_from_user_ll_nocache);
  704. unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from,
  705. unsigned long n)
  706. {
  707. #ifdef CONFIG_X86_INTEL_USERCOPY
  708. if (n > 64 && cpu_has_xmm2)
  709. n = __copy_user_intel_nocache(to, from, n);
  710. else
  711. __copy_user(to, from, n);
  712. #else
  713. __copy_user(to, from, n);
  714. #endif
  715. return n;
  716. }
  717. EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
  718. /**
  719. * copy_to_user: - Copy a block of data into user space.
  720. * @to: Destination address, in user space.
  721. * @from: Source address, in kernel space.
  722. * @n: Number of bytes to copy.
  723. *
  724. * Context: User context only. This function may sleep.
  725. *
  726. * Copy data from kernel space to user space.
  727. *
  728. * Returns number of bytes that could not be copied.
  729. * On success, this will be zero.
  730. */
  731. unsigned long
  732. copy_to_user(void __user *to, const void *from, unsigned long n)
  733. {
  734. if (access_ok(VERIFY_WRITE, to, n))
  735. n = __copy_to_user(to, from, n);
  736. return n;
  737. }
  738. EXPORT_SYMBOL(copy_to_user);
  739. /**
  740. * copy_from_user: - Copy a block of data from user space.
  741. * @to: Destination address, in kernel space.
  742. * @from: Source address, in user space.
  743. * @n: Number of bytes to copy.
  744. *
  745. * Context: User context only. This function may sleep.
  746. *
  747. * Copy data from user space to kernel space.
  748. *
  749. * Returns number of bytes that could not be copied.
  750. * On success, this will be zero.
  751. *
  752. * If some data could not be copied, this function will pad the copied
  753. * data to the requested size using zero bytes.
  754. */
  755. unsigned long
  756. _copy_from_user(void *to, const void __user *from, unsigned long n)
  757. {
  758. if (access_ok(VERIFY_READ, from, n))
  759. n = __copy_from_user(to, from, n);
  760. else
  761. memset(to, 0, n);
  762. return n;
  763. }
  764. EXPORT_SYMBOL(_copy_from_user);
  765. void copy_from_user_overflow(void)
  766. {
  767. WARN(1, "Buffer overflow detected!\n");
  768. }
  769. EXPORT_SYMBOL(copy_from_user_overflow);