inet_lro.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601
  1. /*
  2. * linux/net/ipv4/inet_lro.c
  3. *
  4. * Large Receive Offload (ipv4 / tcp)
  5. *
  6. * (C) Copyright IBM Corp. 2007
  7. *
  8. * Authors:
  9. * Jan-Bernd Themann <themann@de.ibm.com>
  10. * Christoph Raisch <raisch@de.ibm.com>
  11. *
  12. *
  13. * This program is free software; you can redistribute it and/or modify
  14. * it under the terms of the GNU General Public License as published by
  15. * the Free Software Foundation; either version 2, or (at your option)
  16. * any later version.
  17. *
  18. * This program is distributed in the hope that it will be useful,
  19. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  21. * GNU General Public License for more details.
  22. *
  23. * You should have received a copy of the GNU General Public License
  24. * along with this program; if not, write to the Free Software
  25. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  26. */
  27. #include <linux/module.h>
  28. #include <linux/if_vlan.h>
  29. #include <linux/inet_lro.h>
  30. MODULE_LICENSE("GPL");
  31. MODULE_AUTHOR("Jan-Bernd Themann <themann@de.ibm.com>");
  32. MODULE_DESCRIPTION("Large Receive Offload (ipv4 / tcp)");
  33. #define TCP_HDR_LEN(tcph) (tcph->doff << 2)
  34. #define IP_HDR_LEN(iph) (iph->ihl << 2)
  35. #define TCP_PAYLOAD_LENGTH(iph, tcph) \
  36. (ntohs(iph->tot_len) - IP_HDR_LEN(iph) - TCP_HDR_LEN(tcph))
  37. #define IPH_LEN_WO_OPTIONS 5
  38. #define TCPH_LEN_WO_OPTIONS 5
  39. #define TCPH_LEN_W_TIMESTAMP 8
  40. #define LRO_MAX_PG_HLEN 64
  41. #define LRO_INC_STATS(lro_mgr, attr) { lro_mgr->stats.attr++; }
  42. /*
  43. * Basic tcp checks whether packet is suitable for LRO
  44. */
  45. static int lro_tcp_ip_check(const struct iphdr *iph, const struct tcphdr *tcph,
  46. int len, const struct net_lro_desc *lro_desc)
  47. {
  48. /* check ip header: don't aggregate padded frames */
  49. if (ntohs(iph->tot_len) != len)
  50. return -1;
  51. if (TCP_PAYLOAD_LENGTH(iph, tcph) == 0)
  52. return -1;
  53. if (iph->ihl != IPH_LEN_WO_OPTIONS)
  54. return -1;
  55. if (tcph->cwr || tcph->ece || tcph->urg || !tcph->ack ||
  56. tcph->rst || tcph->syn || tcph->fin)
  57. return -1;
  58. if (INET_ECN_is_ce(ipv4_get_dsfield(iph)))
  59. return -1;
  60. if (tcph->doff != TCPH_LEN_WO_OPTIONS &&
  61. tcph->doff != TCPH_LEN_W_TIMESTAMP)
  62. return -1;
  63. /* check tcp options (only timestamp allowed) */
  64. if (tcph->doff == TCPH_LEN_W_TIMESTAMP) {
  65. __be32 *topt = (__be32 *)(tcph + 1);
  66. if (*topt != htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
  67. | (TCPOPT_TIMESTAMP << 8)
  68. | TCPOLEN_TIMESTAMP))
  69. return -1;
  70. /* timestamp should be in right order */
  71. topt++;
  72. if (lro_desc && after(ntohl(lro_desc->tcp_rcv_tsval),
  73. ntohl(*topt)))
  74. return -1;
  75. /* timestamp reply should not be zero */
  76. topt++;
  77. if (*topt == 0)
  78. return -1;
  79. }
  80. return 0;
  81. }
  82. static void lro_update_tcp_ip_header(struct net_lro_desc *lro_desc)
  83. {
  84. struct iphdr *iph = lro_desc->iph;
  85. struct tcphdr *tcph = lro_desc->tcph;
  86. __be32 *p;
  87. __wsum tcp_hdr_csum;
  88. tcph->ack_seq = lro_desc->tcp_ack;
  89. tcph->window = lro_desc->tcp_window;
  90. if (lro_desc->tcp_saw_tstamp) {
  91. p = (__be32 *)(tcph + 1);
  92. *(p+2) = lro_desc->tcp_rcv_tsecr;
  93. }
  94. iph->tot_len = htons(lro_desc->ip_tot_len);
  95. iph->check = 0;
  96. iph->check = ip_fast_csum((u8 *)lro_desc->iph, iph->ihl);
  97. tcph->check = 0;
  98. tcp_hdr_csum = csum_partial(tcph, TCP_HDR_LEN(tcph), 0);
  99. lro_desc->data_csum = csum_add(lro_desc->data_csum, tcp_hdr_csum);
  100. tcph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
  101. lro_desc->ip_tot_len -
  102. IP_HDR_LEN(iph), IPPROTO_TCP,
  103. lro_desc->data_csum);
  104. }
  105. static __wsum lro_tcp_data_csum(struct iphdr *iph, struct tcphdr *tcph, int len)
  106. {
  107. __wsum tcp_csum;
  108. __wsum tcp_hdr_csum;
  109. __wsum tcp_ps_hdr_csum;
  110. tcp_csum = ~csum_unfold(tcph->check);
  111. tcp_hdr_csum = csum_partial(tcph, TCP_HDR_LEN(tcph), tcp_csum);
  112. tcp_ps_hdr_csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
  113. len + TCP_HDR_LEN(tcph),
  114. IPPROTO_TCP, 0);
  115. return csum_sub(csum_sub(tcp_csum, tcp_hdr_csum),
  116. tcp_ps_hdr_csum);
  117. }
  118. static void lro_init_desc(struct net_lro_desc *lro_desc, struct sk_buff *skb,
  119. struct iphdr *iph, struct tcphdr *tcph,
  120. u16 vlan_tag, struct vlan_group *vgrp)
  121. {
  122. int nr_frags;
  123. __be32 *ptr;
  124. u32 tcp_data_len = TCP_PAYLOAD_LENGTH(iph, tcph);
  125. nr_frags = skb_shinfo(skb)->nr_frags;
  126. lro_desc->parent = skb;
  127. lro_desc->next_frag = &(skb_shinfo(skb)->frags[nr_frags]);
  128. lro_desc->iph = iph;
  129. lro_desc->tcph = tcph;
  130. lro_desc->tcp_next_seq = ntohl(tcph->seq) + tcp_data_len;
  131. lro_desc->tcp_ack = tcph->ack_seq;
  132. lro_desc->tcp_window = tcph->window;
  133. lro_desc->pkt_aggr_cnt = 1;
  134. lro_desc->ip_tot_len = ntohs(iph->tot_len);
  135. if (tcph->doff == 8) {
  136. ptr = (__be32 *)(tcph+1);
  137. lro_desc->tcp_saw_tstamp = 1;
  138. lro_desc->tcp_rcv_tsval = *(ptr+1);
  139. lro_desc->tcp_rcv_tsecr = *(ptr+2);
  140. }
  141. lro_desc->mss = tcp_data_len;
  142. lro_desc->vgrp = vgrp;
  143. lro_desc->vlan_tag = vlan_tag;
  144. lro_desc->active = 1;
  145. lro_desc->data_csum = lro_tcp_data_csum(iph, tcph,
  146. tcp_data_len);
  147. }
  148. static inline void lro_clear_desc(struct net_lro_desc *lro_desc)
  149. {
  150. memset(lro_desc, 0, sizeof(struct net_lro_desc));
  151. }
  152. static void lro_add_common(struct net_lro_desc *lro_desc, struct iphdr *iph,
  153. struct tcphdr *tcph, int tcp_data_len)
  154. {
  155. struct sk_buff *parent = lro_desc->parent;
  156. __be32 *topt;
  157. lro_desc->pkt_aggr_cnt++;
  158. lro_desc->ip_tot_len += tcp_data_len;
  159. lro_desc->tcp_next_seq += tcp_data_len;
  160. lro_desc->tcp_window = tcph->window;
  161. lro_desc->tcp_ack = tcph->ack_seq;
  162. /* don't update tcp_rcv_tsval, would not work with PAWS */
  163. if (lro_desc->tcp_saw_tstamp) {
  164. topt = (__be32 *) (tcph + 1);
  165. lro_desc->tcp_rcv_tsecr = *(topt + 2);
  166. }
  167. lro_desc->data_csum = csum_block_add(lro_desc->data_csum,
  168. lro_tcp_data_csum(iph, tcph,
  169. tcp_data_len),
  170. parent->len);
  171. parent->len += tcp_data_len;
  172. parent->data_len += tcp_data_len;
  173. if (tcp_data_len > lro_desc->mss)
  174. lro_desc->mss = tcp_data_len;
  175. }
  176. static void lro_add_packet(struct net_lro_desc *lro_desc, struct sk_buff *skb,
  177. struct iphdr *iph, struct tcphdr *tcph)
  178. {
  179. struct sk_buff *parent = lro_desc->parent;
  180. int tcp_data_len = TCP_PAYLOAD_LENGTH(iph, tcph);
  181. lro_add_common(lro_desc, iph, tcph, tcp_data_len);
  182. skb_pull(skb, (skb->len - tcp_data_len));
  183. parent->truesize += skb->truesize;
  184. if (lro_desc->last_skb)
  185. lro_desc->last_skb->next = skb;
  186. else
  187. skb_shinfo(parent)->frag_list = skb;
  188. lro_desc->last_skb = skb;
  189. }
  190. static void lro_add_frags(struct net_lro_desc *lro_desc,
  191. int len, int hlen, int truesize,
  192. struct skb_frag_struct *skb_frags,
  193. struct iphdr *iph, struct tcphdr *tcph)
  194. {
  195. struct sk_buff *skb = lro_desc->parent;
  196. int tcp_data_len = TCP_PAYLOAD_LENGTH(iph, tcph);
  197. lro_add_common(lro_desc, iph, tcph, tcp_data_len);
  198. skb->truesize += truesize;
  199. skb_frags[0].page_offset += hlen;
  200. skb_frags[0].size -= hlen;
  201. while (tcp_data_len > 0) {
  202. *(lro_desc->next_frag) = *skb_frags;
  203. tcp_data_len -= skb_frags->size;
  204. lro_desc->next_frag++;
  205. skb_frags++;
  206. skb_shinfo(skb)->nr_frags++;
  207. }
  208. }
  209. static int lro_check_tcp_conn(struct net_lro_desc *lro_desc,
  210. struct iphdr *iph,
  211. struct tcphdr *tcph)
  212. {
  213. if ((lro_desc->iph->saddr != iph->saddr) ||
  214. (lro_desc->iph->daddr != iph->daddr) ||
  215. (lro_desc->tcph->source != tcph->source) ||
  216. (lro_desc->tcph->dest != tcph->dest))
  217. return -1;
  218. return 0;
  219. }
  220. static struct net_lro_desc *lro_get_desc(struct net_lro_mgr *lro_mgr,
  221. struct net_lro_desc *lro_arr,
  222. struct iphdr *iph,
  223. struct tcphdr *tcph)
  224. {
  225. struct net_lro_desc *lro_desc = NULL;
  226. struct net_lro_desc *tmp;
  227. int max_desc = lro_mgr->max_desc;
  228. int i;
  229. for (i = 0; i < max_desc; i++) {
  230. tmp = &lro_arr[i];
  231. if (tmp->active)
  232. if (!lro_check_tcp_conn(tmp, iph, tcph)) {
  233. lro_desc = tmp;
  234. goto out;
  235. }
  236. }
  237. for (i = 0; i < max_desc; i++) {
  238. if (!lro_arr[i].active) {
  239. lro_desc = &lro_arr[i];
  240. goto out;
  241. }
  242. }
  243. LRO_INC_STATS(lro_mgr, no_desc);
  244. out:
  245. return lro_desc;
  246. }
  247. static void lro_flush(struct net_lro_mgr *lro_mgr,
  248. struct net_lro_desc *lro_desc)
  249. {
  250. if (lro_desc->pkt_aggr_cnt > 1)
  251. lro_update_tcp_ip_header(lro_desc);
  252. skb_shinfo(lro_desc->parent)->gso_size = lro_desc->mss;
  253. if (lro_desc->vgrp) {
  254. if (lro_mgr->features & LRO_F_NAPI)
  255. vlan_hwaccel_receive_skb(lro_desc->parent,
  256. lro_desc->vgrp,
  257. lro_desc->vlan_tag);
  258. else
  259. vlan_hwaccel_rx(lro_desc->parent,
  260. lro_desc->vgrp,
  261. lro_desc->vlan_tag);
  262. } else {
  263. if (lro_mgr->features & LRO_F_NAPI)
  264. netif_receive_skb(lro_desc->parent);
  265. else
  266. netif_rx(lro_desc->parent);
  267. }
  268. LRO_INC_STATS(lro_mgr, flushed);
  269. lro_clear_desc(lro_desc);
  270. }
  271. static int __lro_proc_skb(struct net_lro_mgr *lro_mgr, struct sk_buff *skb,
  272. struct vlan_group *vgrp, u16 vlan_tag, void *priv)
  273. {
  274. struct net_lro_desc *lro_desc;
  275. struct iphdr *iph;
  276. struct tcphdr *tcph;
  277. u64 flags;
  278. int vlan_hdr_len = 0;
  279. if (!lro_mgr->get_skb_header ||
  280. lro_mgr->get_skb_header(skb, (void *)&iph, (void *)&tcph,
  281. &flags, priv))
  282. goto out;
  283. if (!(flags & LRO_IPV4) || !(flags & LRO_TCP))
  284. goto out;
  285. lro_desc = lro_get_desc(lro_mgr, lro_mgr->lro_arr, iph, tcph);
  286. if (!lro_desc)
  287. goto out;
  288. if ((skb->protocol == htons(ETH_P_8021Q)) &&
  289. !(lro_mgr->features & LRO_F_EXTRACT_VLAN_ID))
  290. vlan_hdr_len = VLAN_HLEN;
  291. if (!lro_desc->active) { /* start new lro session */
  292. if (lro_tcp_ip_check(iph, tcph, skb->len - vlan_hdr_len, NULL))
  293. goto out;
  294. skb->ip_summed = lro_mgr->ip_summed_aggr;
  295. lro_init_desc(lro_desc, skb, iph, tcph, vlan_tag, vgrp);
  296. LRO_INC_STATS(lro_mgr, aggregated);
  297. return 0;
  298. }
  299. if (lro_desc->tcp_next_seq != ntohl(tcph->seq))
  300. goto out2;
  301. if (lro_tcp_ip_check(iph, tcph, skb->len, lro_desc))
  302. goto out2;
  303. lro_add_packet(lro_desc, skb, iph, tcph);
  304. LRO_INC_STATS(lro_mgr, aggregated);
  305. if ((lro_desc->pkt_aggr_cnt >= lro_mgr->max_aggr) ||
  306. lro_desc->parent->len > (0xFFFF - lro_mgr->dev->mtu))
  307. lro_flush(lro_mgr, lro_desc);
  308. return 0;
  309. out2: /* send aggregated SKBs to stack */
  310. lro_flush(lro_mgr, lro_desc);
  311. out:
  312. return 1;
  313. }
  314. static struct sk_buff *lro_gen_skb(struct net_lro_mgr *lro_mgr,
  315. struct skb_frag_struct *frags,
  316. int len, int true_size,
  317. void *mac_hdr,
  318. int hlen, __wsum sum,
  319. u32 ip_summed)
  320. {
  321. struct sk_buff *skb;
  322. struct skb_frag_struct *skb_frags;
  323. int data_len = len;
  324. int hdr_len = min(len, hlen);
  325. skb = netdev_alloc_skb(lro_mgr->dev, hlen + lro_mgr->frag_align_pad);
  326. if (!skb)
  327. return NULL;
  328. skb_reserve(skb, lro_mgr->frag_align_pad);
  329. skb->len = len;
  330. skb->data_len = len - hdr_len;
  331. skb->truesize += true_size;
  332. skb->tail += hdr_len;
  333. memcpy(skb->data, mac_hdr, hdr_len);
  334. skb_frags = skb_shinfo(skb)->frags;
  335. while (data_len > 0) {
  336. *skb_frags = *frags;
  337. data_len -= frags->size;
  338. skb_frags++;
  339. frags++;
  340. skb_shinfo(skb)->nr_frags++;
  341. }
  342. skb_shinfo(skb)->frags[0].page_offset += hdr_len;
  343. skb_shinfo(skb)->frags[0].size -= hdr_len;
  344. skb->ip_summed = ip_summed;
  345. skb->csum = sum;
  346. skb->protocol = eth_type_trans(skb, lro_mgr->dev);
  347. return skb;
  348. }
  349. static struct sk_buff *__lro_proc_segment(struct net_lro_mgr *lro_mgr,
  350. struct skb_frag_struct *frags,
  351. int len, int true_size,
  352. struct vlan_group *vgrp,
  353. u16 vlan_tag, void *priv, __wsum sum)
  354. {
  355. struct net_lro_desc *lro_desc;
  356. struct iphdr *iph;
  357. struct tcphdr *tcph;
  358. struct sk_buff *skb;
  359. u64 flags;
  360. void *mac_hdr;
  361. int mac_hdr_len;
  362. int hdr_len = LRO_MAX_PG_HLEN;
  363. int vlan_hdr_len = 0;
  364. if (!lro_mgr->get_frag_header ||
  365. lro_mgr->get_frag_header(frags, (void *)&mac_hdr, (void *)&iph,
  366. (void *)&tcph, &flags, priv)) {
  367. mac_hdr = page_address(frags->page) + frags->page_offset;
  368. goto out1;
  369. }
  370. if (!(flags & LRO_IPV4) || !(flags & LRO_TCP))
  371. goto out1;
  372. hdr_len = (int)((void *)(tcph) + TCP_HDR_LEN(tcph) - mac_hdr);
  373. mac_hdr_len = (int)((void *)(iph) - mac_hdr);
  374. lro_desc = lro_get_desc(lro_mgr, lro_mgr->lro_arr, iph, tcph);
  375. if (!lro_desc)
  376. goto out1;
  377. if (!lro_desc->active) { /* start new lro session */
  378. if (lro_tcp_ip_check(iph, tcph, len - mac_hdr_len, NULL))
  379. goto out1;
  380. skb = lro_gen_skb(lro_mgr, frags, len, true_size, mac_hdr,
  381. hdr_len, 0, lro_mgr->ip_summed_aggr);
  382. if (!skb)
  383. goto out;
  384. if ((skb->protocol == htons(ETH_P_8021Q)) &&
  385. !(lro_mgr->features & LRO_F_EXTRACT_VLAN_ID))
  386. vlan_hdr_len = VLAN_HLEN;
  387. iph = (void *)(skb->data + vlan_hdr_len);
  388. tcph = (void *)((u8 *)skb->data + vlan_hdr_len
  389. + IP_HDR_LEN(iph));
  390. lro_init_desc(lro_desc, skb, iph, tcph, 0, NULL);
  391. LRO_INC_STATS(lro_mgr, aggregated);
  392. return NULL;
  393. }
  394. if (lro_desc->tcp_next_seq != ntohl(tcph->seq))
  395. goto out2;
  396. if (lro_tcp_ip_check(iph, tcph, len - mac_hdr_len, lro_desc))
  397. goto out2;
  398. lro_add_frags(lro_desc, len, hdr_len, true_size, frags, iph, tcph);
  399. LRO_INC_STATS(lro_mgr, aggregated);
  400. if ((skb_shinfo(lro_desc->parent)->nr_frags >= lro_mgr->max_aggr) ||
  401. lro_desc->parent->len > (0xFFFF - lro_mgr->dev->mtu))
  402. lro_flush(lro_mgr, lro_desc);
  403. return NULL;
  404. out2: /* send aggregated packets to the stack */
  405. lro_flush(lro_mgr, lro_desc);
  406. out1: /* Original packet has to be posted to the stack */
  407. skb = lro_gen_skb(lro_mgr, frags, len, true_size, mac_hdr,
  408. hdr_len, sum, lro_mgr->ip_summed);
  409. out:
  410. return skb;
  411. }
  412. void lro_receive_skb(struct net_lro_mgr *lro_mgr,
  413. struct sk_buff *skb,
  414. void *priv)
  415. {
  416. if (__lro_proc_skb(lro_mgr, skb, NULL, 0, priv)) {
  417. if (lro_mgr->features & LRO_F_NAPI)
  418. netif_receive_skb(skb);
  419. else
  420. netif_rx(skb);
  421. }
  422. }
  423. EXPORT_SYMBOL(lro_receive_skb);
  424. void lro_vlan_hwaccel_receive_skb(struct net_lro_mgr *lro_mgr,
  425. struct sk_buff *skb,
  426. struct vlan_group *vgrp,
  427. u16 vlan_tag,
  428. void *priv)
  429. {
  430. if (__lro_proc_skb(lro_mgr, skb, vgrp, vlan_tag, priv)) {
  431. if (lro_mgr->features & LRO_F_NAPI)
  432. vlan_hwaccel_receive_skb(skb, vgrp, vlan_tag);
  433. else
  434. vlan_hwaccel_rx(skb, vgrp, vlan_tag);
  435. }
  436. }
  437. EXPORT_SYMBOL(lro_vlan_hwaccel_receive_skb);
  438. void lro_receive_frags(struct net_lro_mgr *lro_mgr,
  439. struct skb_frag_struct *frags,
  440. int len, int true_size, void *priv, __wsum sum)
  441. {
  442. struct sk_buff *skb;
  443. skb = __lro_proc_segment(lro_mgr, frags, len, true_size, NULL, 0,
  444. priv, sum);
  445. if (!skb)
  446. return;
  447. if (lro_mgr->features & LRO_F_NAPI)
  448. netif_receive_skb(skb);
  449. else
  450. netif_rx(skb);
  451. }
  452. EXPORT_SYMBOL(lro_receive_frags);
  453. void lro_vlan_hwaccel_receive_frags(struct net_lro_mgr *lro_mgr,
  454. struct skb_frag_struct *frags,
  455. int len, int true_size,
  456. struct vlan_group *vgrp,
  457. u16 vlan_tag, void *priv, __wsum sum)
  458. {
  459. struct sk_buff *skb;
  460. skb = __lro_proc_segment(lro_mgr, frags, len, true_size, vgrp,
  461. vlan_tag, priv, sum);
  462. if (!skb)
  463. return;
  464. if (lro_mgr->features & LRO_F_NAPI)
  465. vlan_hwaccel_receive_skb(skb, vgrp, vlan_tag);
  466. else
  467. vlan_hwaccel_rx(skb, vgrp, vlan_tag);
  468. }
  469. EXPORT_SYMBOL(lro_vlan_hwaccel_receive_frags);
  470. void lro_flush_all(struct net_lro_mgr *lro_mgr)
  471. {
  472. int i;
  473. struct net_lro_desc *lro_desc = lro_mgr->lro_arr;
  474. for (i = 0; i < lro_mgr->max_desc; i++) {
  475. if (lro_desc[i].active)
  476. lro_flush(lro_mgr, &lro_desc[i]);
  477. }
  478. }
  479. EXPORT_SYMBOL(lro_flush_all);
  480. void lro_flush_pkt(struct net_lro_mgr *lro_mgr,
  481. struct iphdr *iph, struct tcphdr *tcph)
  482. {
  483. struct net_lro_desc *lro_desc;
  484. lro_desc = lro_get_desc(lro_mgr, lro_mgr->lro_arr, iph, tcph);
  485. if (lro_desc->active)
  486. lro_flush(lro_mgr, lro_desc);
  487. }
  488. EXPORT_SYMBOL(lro_flush_pkt);