checksum.h 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139
  1. /*
  2. * S390 fast network checksum routines
  3. *
  4. * S390 version
  5. * Copyright IBM Corp. 1999
  6. * Author(s): Ulrich Hild (first version)
  7. * Martin Schwidefsky (heavily optimized CKSM version)
  8. * D.J. Barrow (third attempt)
  9. */
  10. #ifndef _S390_CHECKSUM_H
  11. #define _S390_CHECKSUM_H
  12. #include <asm/uaccess.h>
  13. /*
  14. * computes the checksum of a memory block at buff, length len,
  15. * and adds in "sum" (32-bit)
  16. *
  17. * returns a 32-bit number suitable for feeding into itself
  18. * or csum_tcpudp_magic
  19. *
  20. * this function must be called with even lengths, except
  21. * for the last fragment, which may be odd
  22. *
  23. * it's best to have buff aligned on a 32-bit boundary
  24. */
  25. static inline __wsum
  26. csum_partial(const void *buff, int len, __wsum sum)
  27. {
  28. register unsigned long reg2 asm("2") = (unsigned long) buff;
  29. register unsigned long reg3 asm("3") = (unsigned long) len;
  30. asm volatile(
  31. "0: cksm %0,%1\n" /* do checksum on longs */
  32. " jo 0b\n"
  33. : "+d" (sum), "+d" (reg2), "+d" (reg3) : : "cc", "memory");
  34. return sum;
  35. }
  36. /*
  37. * the same as csum_partial_copy, but copies from user space.
  38. *
  39. * here even more important to align src and dst on a 32-bit (or even
  40. * better 64-bit) boundary
  41. *
  42. * Copy from userspace and compute checksum.
  43. */
  44. static inline __wsum
  45. csum_partial_copy_from_user(const void __user *src, void *dst,
  46. int len, __wsum sum,
  47. int *err_ptr)
  48. {
  49. if (unlikely(copy_from_user(dst, src, len)))
  50. *err_ptr = -EFAULT;
  51. return csum_partial(dst, len, sum);
  52. }
  53. static inline __wsum
  54. csum_partial_copy_nocheck (const void *src, void *dst, int len, __wsum sum)
  55. {
  56. memcpy(dst,src,len);
  57. return csum_partial(dst, len, sum);
  58. }
  59. /*
  60. * Fold a partial checksum without adding pseudo headers
  61. */
  62. static inline __sum16 csum_fold(__wsum sum)
  63. {
  64. u32 csum = (__force u32) sum;
  65. csum += (csum >> 16) + (csum << 16);
  66. csum >>= 16;
  67. return (__force __sum16) ~csum;
  68. }
  69. /*
  70. * This is a version of ip_compute_csum() optimized for IP headers,
  71. * which always checksum on 4 octet boundaries.
  72. *
  73. */
  74. static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
  75. {
  76. return csum_fold(csum_partial(iph, ihl*4, 0));
  77. }
  78. /*
  79. * computes the checksum of the TCP/UDP pseudo-header
  80. * returns a 32-bit checksum
  81. */
  82. static inline __wsum
  83. csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, __u8 proto,
  84. __wsum sum)
  85. {
  86. __u32 csum = (__force __u32)sum;
  87. csum += (__force __u32)saddr;
  88. if (csum < (__force __u32)saddr)
  89. csum++;
  90. csum += (__force __u32)daddr;
  91. if (csum < (__force __u32)daddr)
  92. csum++;
  93. csum += len + proto;
  94. if (csum < len + proto)
  95. csum++;
  96. return (__force __wsum)csum;
  97. }
  98. /*
  99. * computes the checksum of the TCP/UDP pseudo-header
  100. * returns a 16-bit checksum, already complemented
  101. */
  102. static inline __sum16
  103. csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len, __u8 proto,
  104. __wsum sum)
  105. {
  106. return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
  107. }
  108. /*
  109. * this routine is used for miscellaneous IP-like checksums, mainly
  110. * in icmp.c
  111. */
  112. static inline __sum16 ip_compute_csum(const void *buff, int len)
  113. {
  114. return csum_fold(csum_partial(buff, len, 0));
  115. }
  116. #endif /* _S390_CHECKSUM_H */