inet_lro.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549
  1. /*
  2. * linux/net/ipv4/inet_lro.c
  3. *
  4. * Large Receive Offload (ipv4 / tcp)
  5. *
  6. * (C) Copyright IBM Corp. 2007
  7. *
  8. * Authors:
  9. * Jan-Bernd Themann <themann@de.ibm.com>
  10. * Christoph Raisch <raisch@de.ibm.com>
  11. *
  12. *
  13. * This program is free software; you can redistribute it and/or modify
  14. * it under the terms of the GNU General Public License as published by
  15. * the Free Software Foundation; either version 2, or (at your option)
  16. * any later version.
  17. *
  18. * This program is distributed in the hope that it will be useful,
  19. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  21. * GNU General Public License for more details.
  22. *
  23. * You should have received a copy of the GNU General Public License
  24. * along with this program; if not, write to the Free Software
  25. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  26. */
  27. #include <linux/module.h>
  28. #include <linux/if_vlan.h>
  29. #include <linux/inet_lro.h>
  30. MODULE_LICENSE("GPL");
  31. MODULE_AUTHOR("Jan-Bernd Themann <themann@de.ibm.com>");
  32. MODULE_DESCRIPTION("Large Receive Offload (ipv4 / tcp)");
  33. #define TCP_HDR_LEN(tcph) (tcph->doff << 2)
  34. #define IP_HDR_LEN(iph) (iph->ihl << 2)
  35. #define TCP_PAYLOAD_LENGTH(iph, tcph) \
  36. (ntohs(iph->tot_len) - IP_HDR_LEN(iph) - TCP_HDR_LEN(tcph))
  37. #define IPH_LEN_WO_OPTIONS 5
  38. #define TCPH_LEN_WO_OPTIONS 5
  39. #define TCPH_LEN_W_TIMESTAMP 8
  40. #define LRO_MAX_PG_HLEN 64
  41. #define LRO_INC_STATS(lro_mgr, attr) { lro_mgr->stats.attr++; }
  42. /*
  43. * Basic tcp checks whether packet is suitable for LRO
  44. */
  45. static int lro_tcp_ip_check(const struct iphdr *iph, const struct tcphdr *tcph,
  46. int len, const struct net_lro_desc *lro_desc)
  47. {
  48. /* check ip header: don't aggregate padded frames */
  49. if (ntohs(iph->tot_len) != len)
  50. return -1;
  51. if (TCP_PAYLOAD_LENGTH(iph, tcph) == 0)
  52. return -1;
  53. if (iph->ihl != IPH_LEN_WO_OPTIONS)
  54. return -1;
  55. if (tcph->cwr || tcph->ece || tcph->urg || !tcph->ack ||
  56. tcph->rst || tcph->syn || tcph->fin)
  57. return -1;
  58. if (INET_ECN_is_ce(ipv4_get_dsfield(iph)))
  59. return -1;
  60. if (tcph->doff != TCPH_LEN_WO_OPTIONS &&
  61. tcph->doff != TCPH_LEN_W_TIMESTAMP)
  62. return -1;
  63. /* check tcp options (only timestamp allowed) */
  64. if (tcph->doff == TCPH_LEN_W_TIMESTAMP) {
  65. __be32 *topt = (__be32 *)(tcph + 1);
  66. if (*topt != htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
  67. | (TCPOPT_TIMESTAMP << 8)
  68. | TCPOLEN_TIMESTAMP))
  69. return -1;
  70. /* timestamp should be in right order */
  71. topt++;
  72. if (lro_desc && after(ntohl(lro_desc->tcp_rcv_tsval),
  73. ntohl(*topt)))
  74. return -1;
  75. /* timestamp reply should not be zero */
  76. topt++;
  77. if (*topt == 0)
  78. return -1;
  79. }
  80. return 0;
  81. }
  82. static void lro_update_tcp_ip_header(struct net_lro_desc *lro_desc)
  83. {
  84. struct iphdr *iph = lro_desc->iph;
  85. struct tcphdr *tcph = lro_desc->tcph;
  86. __be32 *p;
  87. __wsum tcp_hdr_csum;
  88. tcph->ack_seq = lro_desc->tcp_ack;
  89. tcph->window = lro_desc->tcp_window;
  90. if (lro_desc->tcp_saw_tstamp) {
  91. p = (__be32 *)(tcph + 1);
  92. *(p+2) = lro_desc->tcp_rcv_tsecr;
  93. }
  94. iph->tot_len = htons(lro_desc->ip_tot_len);
  95. iph->check = 0;
  96. iph->check = ip_fast_csum((u8 *)lro_desc->iph, iph->ihl);
  97. tcph->check = 0;
  98. tcp_hdr_csum = csum_partial(tcph, TCP_HDR_LEN(tcph), 0);
  99. lro_desc->data_csum = csum_add(lro_desc->data_csum, tcp_hdr_csum);
  100. tcph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
  101. lro_desc->ip_tot_len -
  102. IP_HDR_LEN(iph), IPPROTO_TCP,
  103. lro_desc->data_csum);
  104. }
  105. static __wsum lro_tcp_data_csum(struct iphdr *iph, struct tcphdr *tcph, int len)
  106. {
  107. __wsum tcp_csum;
  108. __wsum tcp_hdr_csum;
  109. __wsum tcp_ps_hdr_csum;
  110. tcp_csum = ~csum_unfold(tcph->check);
  111. tcp_hdr_csum = csum_partial(tcph, TCP_HDR_LEN(tcph), tcp_csum);
  112. tcp_ps_hdr_csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
  113. len + TCP_HDR_LEN(tcph),
  114. IPPROTO_TCP, 0);
  115. return csum_sub(csum_sub(tcp_csum, tcp_hdr_csum),
  116. tcp_ps_hdr_csum);
  117. }
  118. static void lro_init_desc(struct net_lro_desc *lro_desc, struct sk_buff *skb,
  119. struct iphdr *iph, struct tcphdr *tcph)
  120. {
  121. int nr_frags;
  122. __be32 *ptr;
  123. u32 tcp_data_len = TCP_PAYLOAD_LENGTH(iph, tcph);
  124. nr_frags = skb_shinfo(skb)->nr_frags;
  125. lro_desc->parent = skb;
  126. lro_desc->next_frag = &(skb_shinfo(skb)->frags[nr_frags]);
  127. lro_desc->iph = iph;
  128. lro_desc->tcph = tcph;
  129. lro_desc->tcp_next_seq = ntohl(tcph->seq) + tcp_data_len;
  130. lro_desc->tcp_ack = tcph->ack_seq;
  131. lro_desc->tcp_window = tcph->window;
  132. lro_desc->pkt_aggr_cnt = 1;
  133. lro_desc->ip_tot_len = ntohs(iph->tot_len);
  134. if (tcph->doff == 8) {
  135. ptr = (__be32 *)(tcph+1);
  136. lro_desc->tcp_saw_tstamp = 1;
  137. lro_desc->tcp_rcv_tsval = *(ptr+1);
  138. lro_desc->tcp_rcv_tsecr = *(ptr+2);
  139. }
  140. lro_desc->mss = tcp_data_len;
  141. lro_desc->active = 1;
  142. lro_desc->data_csum = lro_tcp_data_csum(iph, tcph,
  143. tcp_data_len);
  144. }
  145. static inline void lro_clear_desc(struct net_lro_desc *lro_desc)
  146. {
  147. memset(lro_desc, 0, sizeof(struct net_lro_desc));
  148. }
  149. static void lro_add_common(struct net_lro_desc *lro_desc, struct iphdr *iph,
  150. struct tcphdr *tcph, int tcp_data_len)
  151. {
  152. struct sk_buff *parent = lro_desc->parent;
  153. __be32 *topt;
  154. lro_desc->pkt_aggr_cnt++;
  155. lro_desc->ip_tot_len += tcp_data_len;
  156. lro_desc->tcp_next_seq += tcp_data_len;
  157. lro_desc->tcp_window = tcph->window;
  158. lro_desc->tcp_ack = tcph->ack_seq;
  159. /* don't update tcp_rcv_tsval, would not work with PAWS */
  160. if (lro_desc->tcp_saw_tstamp) {
  161. topt = (__be32 *) (tcph + 1);
  162. lro_desc->tcp_rcv_tsecr = *(topt + 2);
  163. }
  164. lro_desc->data_csum = csum_block_add(lro_desc->data_csum,
  165. lro_tcp_data_csum(iph, tcph,
  166. tcp_data_len),
  167. parent->len);
  168. parent->len += tcp_data_len;
  169. parent->data_len += tcp_data_len;
  170. if (tcp_data_len > lro_desc->mss)
  171. lro_desc->mss = tcp_data_len;
  172. }
  173. static void lro_add_packet(struct net_lro_desc *lro_desc, struct sk_buff *skb,
  174. struct iphdr *iph, struct tcphdr *tcph)
  175. {
  176. struct sk_buff *parent = lro_desc->parent;
  177. int tcp_data_len = TCP_PAYLOAD_LENGTH(iph, tcph);
  178. lro_add_common(lro_desc, iph, tcph, tcp_data_len);
  179. skb_pull(skb, (skb->len - tcp_data_len));
  180. parent->truesize += skb->truesize;
  181. if (lro_desc->last_skb)
  182. lro_desc->last_skb->next = skb;
  183. else
  184. skb_shinfo(parent)->frag_list = skb;
  185. lro_desc->last_skb = skb;
  186. }
  187. static void lro_add_frags(struct net_lro_desc *lro_desc,
  188. int len, int hlen, int truesize,
  189. struct skb_frag_struct *skb_frags,
  190. struct iphdr *iph, struct tcphdr *tcph)
  191. {
  192. struct sk_buff *skb = lro_desc->parent;
  193. int tcp_data_len = TCP_PAYLOAD_LENGTH(iph, tcph);
  194. lro_add_common(lro_desc, iph, tcph, tcp_data_len);
  195. skb->truesize += truesize;
  196. skb_frags[0].page_offset += hlen;
  197. skb_frag_size_sub(&skb_frags[0], hlen);
  198. while (tcp_data_len > 0) {
  199. *(lro_desc->next_frag) = *skb_frags;
  200. tcp_data_len -= skb_frag_size(skb_frags);
  201. lro_desc->next_frag++;
  202. skb_frags++;
  203. skb_shinfo(skb)->nr_frags++;
  204. }
  205. }
  206. static int lro_check_tcp_conn(struct net_lro_desc *lro_desc,
  207. struct iphdr *iph,
  208. struct tcphdr *tcph)
  209. {
  210. if ((lro_desc->iph->saddr != iph->saddr) ||
  211. (lro_desc->iph->daddr != iph->daddr) ||
  212. (lro_desc->tcph->source != tcph->source) ||
  213. (lro_desc->tcph->dest != tcph->dest))
  214. return -1;
  215. return 0;
  216. }
  217. static struct net_lro_desc *lro_get_desc(struct net_lro_mgr *lro_mgr,
  218. struct net_lro_desc *lro_arr,
  219. struct iphdr *iph,
  220. struct tcphdr *tcph)
  221. {
  222. struct net_lro_desc *lro_desc = NULL;
  223. struct net_lro_desc *tmp;
  224. int max_desc = lro_mgr->max_desc;
  225. int i;
  226. for (i = 0; i < max_desc; i++) {
  227. tmp = &lro_arr[i];
  228. if (tmp->active)
  229. if (!lro_check_tcp_conn(tmp, iph, tcph)) {
  230. lro_desc = tmp;
  231. goto out;
  232. }
  233. }
  234. for (i = 0; i < max_desc; i++) {
  235. if (!lro_arr[i].active) {
  236. lro_desc = &lro_arr[i];
  237. goto out;
  238. }
  239. }
  240. LRO_INC_STATS(lro_mgr, no_desc);
  241. out:
  242. return lro_desc;
  243. }
  244. static void lro_flush(struct net_lro_mgr *lro_mgr,
  245. struct net_lro_desc *lro_desc)
  246. {
  247. if (lro_desc->pkt_aggr_cnt > 1)
  248. lro_update_tcp_ip_header(lro_desc);
  249. skb_shinfo(lro_desc->parent)->gso_size = lro_desc->mss;
  250. if (lro_mgr->features & LRO_F_NAPI)
  251. netif_receive_skb(lro_desc->parent);
  252. else
  253. netif_rx(lro_desc->parent);
  254. LRO_INC_STATS(lro_mgr, flushed);
  255. lro_clear_desc(lro_desc);
  256. }
  257. static int __lro_proc_skb(struct net_lro_mgr *lro_mgr, struct sk_buff *skb,
  258. void *priv)
  259. {
  260. struct net_lro_desc *lro_desc;
  261. struct iphdr *iph;
  262. struct tcphdr *tcph;
  263. u64 flags;
  264. int vlan_hdr_len = 0;
  265. if (!lro_mgr->get_skb_header ||
  266. lro_mgr->get_skb_header(skb, (void *)&iph, (void *)&tcph,
  267. &flags, priv))
  268. goto out;
  269. if (!(flags & LRO_IPV4) || !(flags & LRO_TCP))
  270. goto out;
  271. lro_desc = lro_get_desc(lro_mgr, lro_mgr->lro_arr, iph, tcph);
  272. if (!lro_desc)
  273. goto out;
  274. if ((skb->protocol == htons(ETH_P_8021Q)) &&
  275. !(lro_mgr->features & LRO_F_EXTRACT_VLAN_ID))
  276. vlan_hdr_len = VLAN_HLEN;
  277. if (!lro_desc->active) { /* start new lro session */
  278. if (lro_tcp_ip_check(iph, tcph, skb->len - vlan_hdr_len, NULL))
  279. goto out;
  280. skb->ip_summed = lro_mgr->ip_summed_aggr;
  281. lro_init_desc(lro_desc, skb, iph, tcph);
  282. LRO_INC_STATS(lro_mgr, aggregated);
  283. return 0;
  284. }
  285. if (lro_desc->tcp_next_seq != ntohl(tcph->seq))
  286. goto out2;
  287. if (lro_tcp_ip_check(iph, tcph, skb->len, lro_desc))
  288. goto out2;
  289. lro_add_packet(lro_desc, skb, iph, tcph);
  290. LRO_INC_STATS(lro_mgr, aggregated);
  291. if ((lro_desc->pkt_aggr_cnt >= lro_mgr->max_aggr) ||
  292. lro_desc->parent->len > (0xFFFF - lro_mgr->dev->mtu))
  293. lro_flush(lro_mgr, lro_desc);
  294. return 0;
  295. out2: /* send aggregated SKBs to stack */
  296. lro_flush(lro_mgr, lro_desc);
  297. out:
  298. return 1;
  299. }
  300. static struct sk_buff *lro_gen_skb(struct net_lro_mgr *lro_mgr,
  301. struct skb_frag_struct *frags,
  302. int len, int true_size,
  303. void *mac_hdr,
  304. int hlen, __wsum sum,
  305. u32 ip_summed)
  306. {
  307. struct sk_buff *skb;
  308. struct skb_frag_struct *skb_frags;
  309. int data_len = len;
  310. int hdr_len = min(len, hlen);
  311. skb = netdev_alloc_skb(lro_mgr->dev, hlen + lro_mgr->frag_align_pad);
  312. if (!skb)
  313. return NULL;
  314. skb_reserve(skb, lro_mgr->frag_align_pad);
  315. skb->len = len;
  316. skb->data_len = len - hdr_len;
  317. skb->truesize += true_size;
  318. skb->tail += hdr_len;
  319. memcpy(skb->data, mac_hdr, hdr_len);
  320. skb_frags = skb_shinfo(skb)->frags;
  321. while (data_len > 0) {
  322. *skb_frags = *frags;
  323. data_len -= skb_frag_size(frags);
  324. skb_frags++;
  325. frags++;
  326. skb_shinfo(skb)->nr_frags++;
  327. }
  328. skb_shinfo(skb)->frags[0].page_offset += hdr_len;
  329. skb_frag_size_sub(&skb_shinfo(skb)->frags[0], hdr_len);
  330. skb->ip_summed = ip_summed;
  331. skb->csum = sum;
  332. skb->protocol = eth_type_trans(skb, lro_mgr->dev);
  333. return skb;
  334. }
  335. static struct sk_buff *__lro_proc_segment(struct net_lro_mgr *lro_mgr,
  336. struct skb_frag_struct *frags,
  337. int len, int true_size,
  338. void *priv, __wsum sum)
  339. {
  340. struct net_lro_desc *lro_desc;
  341. struct iphdr *iph;
  342. struct tcphdr *tcph;
  343. struct sk_buff *skb;
  344. u64 flags;
  345. void *mac_hdr;
  346. int mac_hdr_len;
  347. int hdr_len = LRO_MAX_PG_HLEN;
  348. int vlan_hdr_len = 0;
  349. if (!lro_mgr->get_frag_header ||
  350. lro_mgr->get_frag_header(frags, (void *)&mac_hdr, (void *)&iph,
  351. (void *)&tcph, &flags, priv)) {
  352. mac_hdr = skb_frag_address(frags);
  353. goto out1;
  354. }
  355. if (!(flags & LRO_IPV4) || !(flags & LRO_TCP))
  356. goto out1;
  357. hdr_len = (int)((void *)(tcph) + TCP_HDR_LEN(tcph) - mac_hdr);
  358. mac_hdr_len = (int)((void *)(iph) - mac_hdr);
  359. lro_desc = lro_get_desc(lro_mgr, lro_mgr->lro_arr, iph, tcph);
  360. if (!lro_desc)
  361. goto out1;
  362. if (!lro_desc->active) { /* start new lro session */
  363. if (lro_tcp_ip_check(iph, tcph, len - mac_hdr_len, NULL))
  364. goto out1;
  365. skb = lro_gen_skb(lro_mgr, frags, len, true_size, mac_hdr,
  366. hdr_len, 0, lro_mgr->ip_summed_aggr);
  367. if (!skb)
  368. goto out;
  369. if ((skb->protocol == htons(ETH_P_8021Q)) &&
  370. !(lro_mgr->features & LRO_F_EXTRACT_VLAN_ID))
  371. vlan_hdr_len = VLAN_HLEN;
  372. iph = (void *)(skb->data + vlan_hdr_len);
  373. tcph = (void *)((u8 *)skb->data + vlan_hdr_len
  374. + IP_HDR_LEN(iph));
  375. lro_init_desc(lro_desc, skb, iph, tcph);
  376. LRO_INC_STATS(lro_mgr, aggregated);
  377. return NULL;
  378. }
  379. if (lro_desc->tcp_next_seq != ntohl(tcph->seq))
  380. goto out2;
  381. if (lro_tcp_ip_check(iph, tcph, len - mac_hdr_len, lro_desc))
  382. goto out2;
  383. lro_add_frags(lro_desc, len, hdr_len, true_size, frags, iph, tcph);
  384. LRO_INC_STATS(lro_mgr, aggregated);
  385. if ((skb_shinfo(lro_desc->parent)->nr_frags >= lro_mgr->max_aggr) ||
  386. lro_desc->parent->len > (0xFFFF - lro_mgr->dev->mtu))
  387. lro_flush(lro_mgr, lro_desc);
  388. return NULL;
  389. out2: /* send aggregated packets to the stack */
  390. lro_flush(lro_mgr, lro_desc);
  391. out1: /* Original packet has to be posted to the stack */
  392. skb = lro_gen_skb(lro_mgr, frags, len, true_size, mac_hdr,
  393. hdr_len, sum, lro_mgr->ip_summed);
  394. out:
  395. return skb;
  396. }
  397. void lro_receive_skb(struct net_lro_mgr *lro_mgr,
  398. struct sk_buff *skb,
  399. void *priv)
  400. {
  401. if (__lro_proc_skb(lro_mgr, skb, priv)) {
  402. if (lro_mgr->features & LRO_F_NAPI)
  403. netif_receive_skb(skb);
  404. else
  405. netif_rx(skb);
  406. }
  407. }
  408. EXPORT_SYMBOL(lro_receive_skb);
  409. void lro_receive_frags(struct net_lro_mgr *lro_mgr,
  410. struct skb_frag_struct *frags,
  411. int len, int true_size, void *priv, __wsum sum)
  412. {
  413. struct sk_buff *skb;
  414. skb = __lro_proc_segment(lro_mgr, frags, len, true_size, priv, sum);
  415. if (!skb)
  416. return;
  417. if (lro_mgr->features & LRO_F_NAPI)
  418. netif_receive_skb(skb);
  419. else
  420. netif_rx(skb);
  421. }
  422. EXPORT_SYMBOL(lro_receive_frags);
  423. void lro_flush_all(struct net_lro_mgr *lro_mgr)
  424. {
  425. int i;
  426. struct net_lro_desc *lro_desc = lro_mgr->lro_arr;
  427. for (i = 0; i < lro_mgr->max_desc; i++) {
  428. if (lro_desc[i].active)
  429. lro_flush(lro_mgr, &lro_desc[i]);
  430. }
  431. }
  432. EXPORT_SYMBOL(lro_flush_all);
  433. void lro_flush_pkt(struct net_lro_mgr *lro_mgr,
  434. struct iphdr *iph, struct tcphdr *tcph)
  435. {
  436. struct net_lro_desc *lro_desc;
  437. lro_desc = lro_get_desc(lro_mgr, lro_mgr->lro_arr, iph, tcph);
  438. if (lro_desc->active)
  439. lro_flush(lro_mgr, lro_desc);
  440. }
  441. EXPORT_SYMBOL(lro_flush_pkt);