checksum.h 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253
  1. /*
  2. * include/asm-xtensa/checksum.h
  3. *
  4. * This file is subject to the terms and conditions of the GNU General Public
  5. * License. See the file "COPYING" in the main directory of this archive
  6. * for more details.
  7. *
  8. * Copyright (C) 2001 - 2005 Tensilica Inc.
  9. */
  10. #ifndef _XTENSA_CHECKSUM_H
  11. #define _XTENSA_CHECKSUM_H
  12. #include <linux/in6.h>
  13. #include <variant/core.h>
  14. /*
  15. * computes the checksum of a memory block at buff, length len,
  16. * and adds in "sum" (32-bit)
  17. *
  18. * returns a 32-bit number suitable for feeding into itself
  19. * or csum_tcpudp_magic
  20. *
  21. * this function must be called with even lengths, except
  22. * for the last fragment, which may be odd
  23. *
  24. * it's best to have buff aligned on a 32-bit boundary
  25. */
  26. asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
  27. /*
  28. * the same as csum_partial, but copies from src while it
  29. * checksums, and handles user-space pointer exceptions correctly, when needed.
  30. *
  31. * here even more important to align src and dst on a 32-bit (or even
  32. * better 64-bit) boundary
  33. */
  34. asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, int len, __wsum sum,
  35. int *src_err_ptr, int *dst_err_ptr);
  36. /*
  37. * Note: when you get a NULL pointer exception here this means someone
  38. * passed in an incorrect kernel address to one of these functions.
  39. *
  40. * If you use these functions directly please don't forget the access_ok().
  41. */
  42. static inline
  43. __wsum csum_partial_copy_nocheck(const void *src, void *dst,
  44. int len, __wsum sum)
  45. {
  46. return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
  47. }
  48. static inline
  49. __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
  50. int len, __wsum sum, int *err_ptr)
  51. {
  52. return csum_partial_copy_generic((__force const void *)src, dst,
  53. len, sum, err_ptr, NULL);
  54. }
  55. /*
  56. * Fold a partial checksum
  57. */
  58. static __inline__ __sum16 csum_fold(__wsum sum)
  59. {
  60. unsigned int __dummy;
  61. __asm__("extui %1, %0, 16, 16\n\t"
  62. "extui %0 ,%0, 0, 16\n\t"
  63. "add %0, %0, %1\n\t"
  64. "slli %1, %0, 16\n\t"
  65. "add %0, %0, %1\n\t"
  66. "extui %0, %0, 16, 16\n\t"
  67. "neg %0, %0\n\t"
  68. "addi %0, %0, -1\n\t"
  69. "extui %0, %0, 0, 16\n\t"
  70. : "=r" (sum), "=&r" (__dummy)
  71. : "0" (sum));
  72. return (__force __sum16)sum;
  73. }
  74. /*
  75. * This is a version of ip_compute_csum() optimized for IP headers,
  76. * which always checksum on 4 octet boundaries.
  77. */
  78. static __inline__ __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
  79. {
  80. unsigned int sum, tmp, endaddr;
  81. __asm__ __volatile__(
  82. "sub %0, %0, %0\n\t"
  83. #if XCHAL_HAVE_LOOPS
  84. "loopgtz %2, 2f\n\t"
  85. #else
  86. "beqz %2, 2f\n\t"
  87. "slli %4, %2, 2\n\t"
  88. "add %4, %4, %1\n\t"
  89. "0:\t"
  90. #endif
  91. "l32i %3, %1, 0\n\t"
  92. "add %0, %0, %3\n\t"
  93. "bgeu %0, %3, 1f\n\t"
  94. "addi %0, %0, 1\n\t"
  95. "1:\t"
  96. "addi %1, %1, 4\n\t"
  97. #if !XCHAL_HAVE_LOOPS
  98. "blt %1, %4, 0b\n\t"
  99. #endif
  100. "2:\t"
  101. /* Since the input registers which are loaded with iph and ihl
  102. are modified, we must also specify them as outputs, or gcc
  103. will assume they contain their original values. */
  104. : "=r" (sum), "=r" (iph), "=r" (ihl), "=&r" (tmp), "=&r" (endaddr)
  105. : "1" (iph), "2" (ihl)
  106. : "memory");
  107. return csum_fold(sum);
  108. }
  109. static __inline__ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
  110. unsigned short len,
  111. unsigned short proto,
  112. __wsum sum)
  113. {
  114. #ifdef __XTENSA_EL__
  115. unsigned long len_proto = (len + proto) << 8;
  116. #elif defined(__XTENSA_EB__)
  117. unsigned long len_proto = len + proto;
  118. #else
  119. # error processor byte order undefined!
  120. #endif
  121. __asm__("add %0, %0, %1\n\t"
  122. "bgeu %0, %1, 1f\n\t"
  123. "addi %0, %0, 1\n\t"
  124. "1:\t"
  125. "add %0, %0, %2\n\t"
  126. "bgeu %0, %2, 1f\n\t"
  127. "addi %0, %0, 1\n\t"
  128. "1:\t"
  129. "add %0, %0, %3\n\t"
  130. "bgeu %0, %3, 1f\n\t"
  131. "addi %0, %0, 1\n\t"
  132. "1:\t"
  133. : "=r" (sum), "=r" (len_proto)
  134. : "r" (daddr), "r" (saddr), "1" (len_proto), "0" (sum));
  135. return sum;
  136. }
  137. /*
  138. * computes the checksum of the TCP/UDP pseudo-header
  139. * returns a 16-bit checksum, already complemented
  140. */
  141. static __inline__ __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
  142. unsigned short len,
  143. unsigned short proto,
  144. __wsum sum)
  145. {
  146. return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
  147. }
  148. /*
  149. * this routine is used for miscellaneous IP-like checksums, mainly
  150. * in icmp.c
  151. */
  152. static __inline__ __sum16 ip_compute_csum(const void *buff, int len)
  153. {
  154. return csum_fold (csum_partial(buff, len, 0));
  155. }
  156. #define _HAVE_ARCH_IPV6_CSUM
  157. static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
  158. const struct in6_addr *daddr,
  159. __u32 len, unsigned short proto,
  160. __wsum sum)
  161. {
  162. unsigned int __dummy;
  163. __asm__("l32i %1, %2, 0\n\t"
  164. "add %0, %0, %1\n\t"
  165. "bgeu %0, %1, 1f\n\t"
  166. "addi %0, %0, 1\n\t"
  167. "1:\t"
  168. "l32i %1, %2, 4\n\t"
  169. "add %0, %0, %1\n\t"
  170. "bgeu %0, %1, 1f\n\t"
  171. "addi %0, %0, 1\n\t"
  172. "1:\t"
  173. "l32i %1, %2, 8\n\t"
  174. "add %0, %0, %1\n\t"
  175. "bgeu %0, %1, 1f\n\t"
  176. "addi %0, %0, 1\n\t"
  177. "1:\t"
  178. "l32i %1, %2, 12\n\t"
  179. "add %0, %0, %1\n\t"
  180. "bgeu %0, %1, 1f\n\t"
  181. "addi %0, %0, 1\n\t"
  182. "1:\t"
  183. "l32i %1, %3, 0\n\t"
  184. "add %0, %0, %1\n\t"
  185. "bgeu %0, %1, 1f\n\t"
  186. "addi %0, %0, 1\n\t"
  187. "1:\t"
  188. "l32i %1, %3, 4\n\t"
  189. "add %0, %0, %1\n\t"
  190. "bgeu %0, %1, 1f\n\t"
  191. "addi %0, %0, 1\n\t"
  192. "1:\t"
  193. "l32i %1, %3, 8\n\t"
  194. "add %0, %0, %1\n\t"
  195. "bgeu %0, %1, 1f\n\t"
  196. "addi %0, %0, 1\n\t"
  197. "1:\t"
  198. "l32i %1, %3, 12\n\t"
  199. "add %0, %0, %1\n\t"
  200. "bgeu %0, %1, 1f\n\t"
  201. "addi %0, %0, 1\n\t"
  202. "1:\t"
  203. "add %0, %0, %4\n\t"
  204. "bgeu %0, %4, 1f\n\t"
  205. "addi %0, %0, 1\n\t"
  206. "1:\t"
  207. "add %0, %0, %5\n\t"
  208. "bgeu %0, %5, 1f\n\t"
  209. "addi %0, %0, 1\n\t"
  210. "1:\t"
  211. : "=r" (sum), "=&r" (__dummy)
  212. : "r" (saddr), "r" (daddr),
  213. "r" (htonl(len)), "r" (htonl(proto)), "0" (sum)
  214. : "memory");
  215. return csum_fold(sum);
  216. }
  217. /*
  218. * Copy and checksum to user
  219. */
  220. #define HAVE_CSUM_COPY_USER
  221. static __inline__ __wsum csum_and_copy_to_user(const void *src, void __user *dst,
  222. int len, __wsum sum, int *err_ptr)
  223. {
  224. if (access_ok(VERIFY_WRITE, dst, len))
  225. return csum_partial_copy_generic(src, dst, len, sum, NULL, err_ptr);
  226. if (len)
  227. *err_ptr = -EFAULT;
  228. return (__force __wsum)-1; /* invalid checksum */
  229. }
  230. #endif