u_ether.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338
  1. /*
  2. * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack
  3. *
  4. * Copyright (C) 2003-2005,2008 David Brownell
  5. * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
  6. * Copyright (C) 2008 Nokia Corporation
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. */
  13. /* #define VERBOSE_DEBUG */
  14. #include <linux/kernel.h>
  15. #include <linux/gfp.h>
  16. #include <linux/device.h>
  17. #include <linux/ctype.h>
  18. #include <linux/etherdevice.h>
  19. #include <linux/ethtool.h>
  20. #include "u_ether.h"
  21. /*
  22. * This component encapsulates the Ethernet link glue needed to provide
  23. * one (!) network link through the USB gadget stack, normally "usb0".
  24. *
  25. * The control and data models are handled by the function driver which
  26. * connects to this code; such as CDC Ethernet (ECM or EEM),
  27. * "CDC Subset", or RNDIS. That includes all descriptor and endpoint
  28. * management.
  29. *
  30. * Link level addressing is handled by this component using module
  31. * parameters; if no such parameters are provided, random link level
  32. * addresses are used. Each end of the link uses one address. The
  33. * host end address is exported in various ways, and is often recorded
  34. * in configuration databases.
  35. *
  36. * The driver which assembles each configuration using such a link is
  37. * responsible for ensuring that each configuration includes at most one
  38. * instance of is network link. (The network layer provides ways for
  39. * this single "physical" link to be used by multiple virtual links.)
  40. */
  41. #define UETH__VERSION "29-May-2008"
  42. static struct workqueue_struct *uether_wq;
  43. struct eth_dev {
  44. /* lock is held while accessing port_usb
  45. * or updating its backlink port_usb->ioport
  46. */
  47. spinlock_t lock;
  48. struct gether *port_usb;
  49. struct net_device *net;
  50. struct usb_gadget *gadget;
  51. spinlock_t req_lock; /* guard {rx,tx}_reqs */
  52. struct list_head tx_reqs, rx_reqs;
  53. unsigned tx_qlen;
  54. /* Minimum number of TX USB request queued to UDC */
  55. #define TX_REQ_THRESHOLD 5
  56. int no_tx_req_used;
  57. int tx_skb_hold_count;
  58. u32 tx_req_bufsize;
  59. struct sk_buff_head rx_frames;
  60. unsigned header_len;
  61. unsigned int ul_max_pkts_per_xfer;
  62. unsigned int dl_max_pkts_per_xfer;
  63. struct sk_buff *(*wrap)(struct gether *, struct sk_buff *skb);
  64. int (*unwrap)(struct gether *,
  65. struct sk_buff *skb,
  66. struct sk_buff_head *list);
  67. struct work_struct work;
  68. struct work_struct rx_work;
  69. unsigned long todo;
  70. #define WORK_RX_MEMORY 0
  71. bool zlp;
  72. u8 host_mac[ETH_ALEN];
  73. };
  74. /*-------------------------------------------------------------------------*/
  75. #define RX_EXTRA 20 /* bytes guarding against rx overflows */
  76. #define DEFAULT_QLEN 2 /* double buffering by default */
  77. static unsigned qmult = 10;
  78. module_param(qmult, uint, S_IRUGO|S_IWUSR);
  79. MODULE_PARM_DESC(qmult, "queue length multiplier at high/super speed");
  80. /* for dual-speed hardware, use deeper queues at high/super speed */
  81. static inline int qlen(struct usb_gadget *gadget)
  82. {
  83. if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH ||
  84. gadget->speed == USB_SPEED_SUPER))
  85. return qmult * DEFAULT_QLEN;
  86. else
  87. return DEFAULT_QLEN;
  88. }
  89. /*-------------------------------------------------------------------------*/
  90. /* REVISIT there must be a better way than having two sets
  91. * of debug calls ...
  92. */
  93. #undef DBG
  94. #undef VDBG
  95. #undef ERROR
  96. #undef INFO
  97. #define xprintk(d, level, fmt, args...) \
  98. printk(level "%s: " fmt , (d)->net->name , ## args)
  99. #ifdef DEBUG
  100. #undef DEBUG
  101. #define DBG(dev, fmt, args...) \
  102. xprintk(dev , KERN_DEBUG , fmt , ## args)
  103. #else
  104. #define DBG(dev, fmt, args...) \
  105. do { } while (0)
  106. #endif /* DEBUG */
  107. #ifdef VERBOSE_DEBUG
  108. #define VDBG DBG
  109. #else
  110. #define VDBG(dev, fmt, args...) \
  111. do { } while (0)
  112. #endif /* DEBUG */
  113. #define ERROR(dev, fmt, args...) \
  114. xprintk(dev , KERN_ERR , fmt , ## args)
  115. #define INFO(dev, fmt, args...) \
  116. xprintk(dev , KERN_INFO , fmt , ## args)
  117. /*-------------------------------------------------------------------------*/
  118. /* NETWORK DRIVER HOOKUP (to the layer above this driver) */
  119. static int ueth_change_mtu(struct net_device *net, int new_mtu)
  120. {
  121. struct eth_dev *dev = netdev_priv(net);
  122. unsigned long flags;
  123. int status = 0;
  124. /* don't change MTU on "live" link (peer won't know) */
  125. spin_lock_irqsave(&dev->lock, flags);
  126. if (dev->port_usb)
  127. status = -EBUSY;
  128. else if (new_mtu <= ETH_HLEN || new_mtu > ETH_FRAME_LEN)
  129. status = -ERANGE;
  130. else
  131. net->mtu = new_mtu;
  132. spin_unlock_irqrestore(&dev->lock, flags);
  133. return status;
  134. }
  135. static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
  136. {
  137. struct eth_dev *dev = netdev_priv(net);
  138. strlcpy(p->driver, "g_ether", sizeof p->driver);
  139. strlcpy(p->version, UETH__VERSION, sizeof p->version);
  140. strlcpy(p->fw_version, dev->gadget->name, sizeof p->fw_version);
  141. strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof p->bus_info);
  142. }
  143. /* REVISIT can also support:
  144. * - WOL (by tracking suspends and issuing remote wakeup)
  145. * - msglevel (implies updated messaging)
  146. * - ... probably more ethtool ops
  147. */
  148. static const struct ethtool_ops ops = {
  149. .get_drvinfo = eth_get_drvinfo,
  150. .get_link = ethtool_op_get_link,
  151. };
  152. static void defer_kevent(struct eth_dev *dev, int flag)
  153. {
  154. if (test_and_set_bit(flag, &dev->todo))
  155. return;
  156. if (!schedule_work(&dev->work))
  157. ERROR(dev, "kevent %d may have been dropped\n", flag);
  158. else
  159. DBG(dev, "kevent %d scheduled\n", flag);
  160. }
  161. static void rx_complete(struct usb_ep *ep, struct usb_request *req);
  162. static void tx_complete(struct usb_ep *ep, struct usb_request *req);
  163. static int
  164. rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
  165. {
  166. struct sk_buff *skb;
  167. int retval = -ENOMEM;
  168. size_t size = 0;
  169. struct usb_ep *out;
  170. unsigned long flags;
  171. spin_lock_irqsave(&dev->lock, flags);
  172. if (dev->port_usb)
  173. out = dev->port_usb->out_ep;
  174. else
  175. out = NULL;
  176. spin_unlock_irqrestore(&dev->lock, flags);
  177. if (!out)
  178. return -ENOTCONN;
  179. /* Padding up to RX_EXTRA handles minor disagreements with host.
  180. * Normally we use the USB "terminate on short read" convention;
  181. * so allow up to (N*maxpacket), since that memory is normally
  182. * already allocated. Some hardware doesn't deal well with short
  183. * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
  184. * byte off the end (to force hardware errors on overflow).
  185. *
  186. * RNDIS uses internal framing, and explicitly allows senders to
  187. * pad to end-of-packet. That's potentially nice for speed, but
  188. * means receivers can't recover lost synch on their own (because
  189. * new packets don't only start after a short RX).
  190. */
  191. size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
  192. size += dev->port_usb->header_len;
  193. size += out->maxpacket - 1;
  194. size -= size % out->maxpacket;
  195. if (dev->ul_max_pkts_per_xfer)
  196. size *= dev->ul_max_pkts_per_xfer;
  197. if (dev->port_usb->is_fixed)
  198. size = max_t(size_t, size, dev->port_usb->fixed_out_len);
  199. pr_debug("%s: size: %d", __func__, size);
  200. skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
  201. if (skb == NULL) {
  202. DBG(dev, "no rx skb\n");
  203. goto enomem;
  204. }
  205. /* Some platforms perform better when IP packets are aligned,
  206. * but on at least one, checksumming fails otherwise. Note:
  207. * RNDIS headers involve variable numbers of LE32 values.
  208. */
  209. skb_reserve(skb, NET_IP_ALIGN);
  210. req->buf = skb->data;
  211. req->length = size;
  212. req->context = skb;
  213. retval = usb_ep_queue(out, req, gfp_flags);
  214. if (retval == -ENOMEM)
  215. enomem:
  216. defer_kevent(dev, WORK_RX_MEMORY);
  217. if (retval) {
  218. DBG(dev, "rx submit --> %d\n", retval);
  219. if (skb)
  220. dev_kfree_skb_any(skb);
  221. }
  222. return retval;
  223. }
  224. static void rx_complete(struct usb_ep *ep, struct usb_request *req)
  225. {
  226. struct sk_buff *skb = req->context;
  227. struct eth_dev *dev = ep->driver_data;
  228. int status = req->status;
  229. bool queue = 0;
  230. switch (status) {
  231. /* normal completion */
  232. case 0:
  233. skb_put(skb, req->actual);
  234. if (dev->unwrap) {
  235. unsigned long flags;
  236. spin_lock_irqsave(&dev->lock, flags);
  237. if (dev->port_usb) {
  238. status = dev->unwrap(dev->port_usb,
  239. skb,
  240. &dev->rx_frames);
  241. if (status == -EINVAL)
  242. dev->net->stats.rx_errors++;
  243. else if (status == -EOVERFLOW)
  244. dev->net->stats.rx_over_errors++;
  245. } else {
  246. dev_kfree_skb_any(skb);
  247. status = -ENOTCONN;
  248. }
  249. spin_unlock_irqrestore(&dev->lock, flags);
  250. } else {
  251. skb_queue_tail(&dev->rx_frames, skb);
  252. }
  253. if (!status)
  254. queue = 1;
  255. break;
  256. /* software-driven interface shutdown */
  257. case -ECONNRESET: /* unlink */
  258. case -ESHUTDOWN: /* disconnect etc */
  259. VDBG(dev, "rx shutdown, code %d\n", status);
  260. goto quiesce;
  261. /* for hardware automagic (such as pxa) */
  262. case -ECONNABORTED: /* endpoint reset */
  263. DBG(dev, "rx %s reset\n", ep->name);
  264. defer_kevent(dev, WORK_RX_MEMORY);
  265. quiesce:
  266. dev_kfree_skb_any(skb);
  267. goto clean;
  268. /* data overrun */
  269. case -EOVERFLOW:
  270. dev->net->stats.rx_over_errors++;
  271. /* FALLTHROUGH */
  272. default:
  273. queue = 1;
  274. dev_kfree_skb_any(skb);
  275. dev->net->stats.rx_errors++;
  276. DBG(dev, "rx status %d\n", status);
  277. break;
  278. }
  279. clean:
  280. spin_lock(&dev->req_lock);
  281. list_add(&req->list, &dev->rx_reqs);
  282. spin_unlock(&dev->req_lock);
  283. if (queue)
  284. queue_work(uether_wq, &dev->rx_work);
  285. }
  286. static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
  287. {
  288. unsigned i;
  289. struct usb_request *req;
  290. bool usb_in;
  291. if (!n)
  292. return -ENOMEM;
  293. /* queue/recycle up to N requests */
  294. i = n;
  295. list_for_each_entry(req, list, list) {
  296. if (i-- == 0)
  297. goto extra;
  298. }
  299. if (ep->desc->bEndpointAddress & USB_DIR_IN)
  300. usb_in = true;
  301. else
  302. usb_in = false;
  303. while (i--) {
  304. req = usb_ep_alloc_request(ep, GFP_ATOMIC);
  305. if (!req)
  306. return list_empty(list) ? -ENOMEM : 0;
  307. /* update completion handler */
  308. if (usb_in)
  309. req->complete = tx_complete;
  310. else
  311. req->complete = rx_complete;
  312. list_add(&req->list, list);
  313. }
  314. return 0;
  315. extra:
  316. /* free extras */
  317. for (;;) {
  318. struct list_head *next;
  319. next = req->list.next;
  320. list_del(&req->list);
  321. usb_ep_free_request(ep, req);
  322. if (next == list)
  323. break;
  324. req = container_of(next, struct usb_request, list);
  325. }
  326. return 0;
  327. }
  328. static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n)
  329. {
  330. int status;
  331. spin_lock(&dev->req_lock);
  332. status = prealloc(&dev->tx_reqs, link->in_ep, n);
  333. if (status < 0)
  334. goto fail;
  335. status = prealloc(&dev->rx_reqs, link->out_ep, n);
  336. if (status < 0)
  337. goto fail;
  338. goto done;
  339. fail:
  340. DBG(dev, "can't alloc requests\n");
  341. done:
  342. spin_unlock(&dev->req_lock);
  343. return status;
  344. }
  345. static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
  346. {
  347. struct usb_request *req;
  348. unsigned long flags;
  349. int req_cnt = 0;
  350. /* fill unused rxq slots with some skb */
  351. spin_lock_irqsave(&dev->req_lock, flags);
  352. while (!list_empty(&dev->rx_reqs)) {
  353. /* break the nexus of continuous completion and re-submission*/
  354. if (++req_cnt > qlen(dev->gadget))
  355. break;
  356. req = container_of(dev->rx_reqs.next,
  357. struct usb_request, list);
  358. list_del_init(&req->list);
  359. spin_unlock_irqrestore(&dev->req_lock, flags);
  360. if (rx_submit(dev, req, gfp_flags) < 0) {
  361. spin_lock_irqsave(&dev->req_lock, flags);
  362. list_add(&req->list, &dev->rx_reqs);
  363. spin_unlock_irqrestore(&dev->req_lock, flags);
  364. defer_kevent(dev, WORK_RX_MEMORY);
  365. return;
  366. }
  367. spin_lock_irqsave(&dev->req_lock, flags);
  368. }
  369. spin_unlock_irqrestore(&dev->req_lock, flags);
  370. }
  371. static void process_rx_w(struct work_struct *work)
  372. {
  373. struct eth_dev *dev = container_of(work, struct eth_dev, rx_work);
  374. struct sk_buff *skb;
  375. int status = 0;
  376. if (!dev->port_usb)
  377. return;
  378. while ((skb = skb_dequeue(&dev->rx_frames))) {
  379. if (status < 0
  380. || ETH_HLEN > skb->len
  381. || skb->len > ETH_FRAME_LEN) {
  382. #ifdef CONFIG_USB_NCM_SUPPORT_MTU_CHANGE
  383. /*
  384. Need to revisit net->mtu does not include header size incase of changed MTU
  385. */
  386. if(!strcmp(dev->port_usb->func.name,"ncm")) {
  387. if (status < 0
  388. || ETH_HLEN > skb->len
  389. || skb->len > (dev->net->mtu + ETH_HLEN)) {
  390. printk(KERN_ERR "usb: %s drop incase of NCM rx length %d\n",__func__,skb->len);
  391. } else {
  392. printk(KERN_ERR "usb: %s Dont drop incase of NCM rx length %d\n",__func__,skb->len);
  393. goto process_frame;
  394. }
  395. }
  396. #endif
  397. dev->net->stats.rx_errors++;
  398. dev->net->stats.rx_length_errors++;
  399. #ifndef CONFIG_USB_NCM_SUPPORT_MTU_CHANGE
  400. DBG(dev, "rx length %d\n", skb->len);
  401. #else
  402. printk(KERN_ERR "usb: %s Drop rx length %d\n",__func__,skb->len);
  403. #endif
  404. dev_kfree_skb_any(skb);
  405. continue;
  406. }
  407. #ifdef CONFIG_USB_NCM_SUPPORT_MTU_CHANGE
  408. process_frame:
  409. #endif
  410. skb->protocol = eth_type_trans(skb, dev->net);
  411. dev->net->stats.rx_packets++;
  412. dev->net->stats.rx_bytes += skb->len;
  413. status = netif_rx_ni(skb);
  414. }
  415. if (netif_running(dev->net))
  416. rx_fill(dev, GFP_KERNEL);
  417. }
  418. static void eth_work(struct work_struct *work)
  419. {
  420. struct eth_dev *dev = container_of(work, struct eth_dev, work);
  421. if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) {
  422. if (netif_running(dev->net))
  423. rx_fill(dev, GFP_KERNEL);
  424. }
  425. if (dev->todo)
  426. DBG(dev, "work done, flags = 0x%lx\n", dev->todo);
  427. }
  428. static void tx_complete(struct usb_ep *ep, struct usb_request *req)
  429. {
  430. struct sk_buff *skb;
  431. struct eth_dev *dev;
  432. struct net_device *net;
  433. struct usb_request *new_req;
  434. struct usb_ep *in;
  435. int length;
  436. int retval;
  437. if (!ep->driver_data) {
  438. usb_ep_free_request(ep, req);
  439. return;
  440. }
  441. dev = ep->driver_data;
  442. net = dev->net;
  443. if (!dev->port_usb) {
  444. usb_ep_free_request(ep, req);
  445. return;
  446. }
  447. switch (req->status) {
  448. default:
  449. dev->net->stats.tx_errors++;
  450. VDBG(dev, "tx err %d\n", req->status);
  451. /* FALLTHROUGH */
  452. case -ECONNRESET: /* unlink */
  453. case -ESHUTDOWN: /* disconnect etc */
  454. break;
  455. case 0:
  456. if (!req->zero)
  457. dev->net->stats.tx_bytes += req->length-1;
  458. else
  459. dev->net->stats.tx_bytes += req->length;
  460. }
  461. dev->net->stats.tx_packets++;
  462. spin_lock(&dev->req_lock);
  463. list_add_tail(&req->list, &dev->tx_reqs);
  464. if (dev->port_usb->multi_pkt_xfer && !req->context) {
  465. dev->no_tx_req_used--;
  466. req->length = 0;
  467. in = dev->port_usb->in_ep;
  468. if (!list_empty(&dev->tx_reqs)) {
  469. new_req = container_of(dev->tx_reqs.next,
  470. struct usb_request, list);
  471. list_del(&new_req->list);
  472. spin_unlock(&dev->req_lock);
  473. if (new_req->length > 0) {
  474. length = new_req->length;
  475. /* NCM requires no zlp if transfer is
  476. * dwNtbInMaxSize */
  477. if (dev->port_usb->is_fixed &&
  478. length == dev->port_usb->fixed_in_len &&
  479. (length % in->maxpacket) == 0)
  480. new_req->zero = 0;
  481. else
  482. new_req->zero = 1;
  483. /* use zlp framing on tx for strict CDC-Ether
  484. * conformance, though any robust network rx
  485. * path ignores extra padding. and some hardware
  486. * doesn't like to write zlps.
  487. */
  488. if (new_req->zero && !dev->zlp &&
  489. (length % in->maxpacket) == 0) {
  490. new_req->zero = 0;
  491. length++;
  492. }
  493. new_req->length = length;
  494. retval = usb_ep_queue(in, new_req, GFP_ATOMIC);
  495. switch (retval) {
  496. default:
  497. #ifndef CONFIG_USB_NCM_SUPPORT_MTU_CHANGE
  498. DBG(dev, "tx queue err %d\n", retval);
  499. #else
  500. printk(KERN_ERR"usb:%s tx queue err %d\n",__func__, retval);
  501. #endif
  502. new_req->length = 0;
  503. spin_lock(&dev->req_lock);
  504. list_add_tail(&new_req->list,
  505. &dev->tx_reqs);
  506. spin_unlock(&dev->req_lock);
  507. break;
  508. case 0:
  509. spin_lock(&dev->req_lock);
  510. dev->no_tx_req_used++;
  511. spin_unlock(&dev->req_lock);
  512. net->trans_start = jiffies;
  513. }
  514. } else {
  515. spin_lock(&dev->req_lock);
  516. /*
  517. * Put the idle request at the back of the
  518. * queue. The xmit function will put the
  519. * unfinished request at the beginning of the
  520. * queue.
  521. */
  522. list_add_tail(&new_req->list, &dev->tx_reqs);
  523. spin_unlock(&dev->req_lock);
  524. }
  525. } else {
  526. spin_unlock(&dev->req_lock);
  527. }
  528. } else {
  529. skb = req->context;
  530. /* Is aggregation already enabled and buffers allocated ? */
  531. if (dev->port_usb->multi_pkt_xfer && dev->tx_req_bufsize) {
  532. req->buf = kzalloc(dev->tx_req_bufsize, GFP_ATOMIC);
  533. req->context = NULL;
  534. } else {
  535. req->buf = NULL;
  536. }
  537. spin_unlock(&dev->req_lock);
  538. dev_kfree_skb_any(skb);
  539. }
  540. if (netif_carrier_ok(dev->net))
  541. netif_wake_queue(dev->net);
  542. }
  543. static inline int is_promisc(u16 cdc_filter)
  544. {
  545. return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
  546. }
  547. static int alloc_tx_buffer(struct eth_dev *dev)
  548. {
  549. struct list_head *act;
  550. struct usb_request *req;
  551. dev->tx_req_bufsize = (dev->dl_max_pkts_per_xfer *
  552. (dev->net->mtu
  553. + sizeof(struct ethhdr)
  554. /* size of rndis_packet_msg_type */
  555. + 44
  556. + 22));
  557. list_for_each(act, &dev->tx_reqs) {
  558. req = container_of(act, struct usb_request, list);
  559. if (!req->buf) {
  560. req->buf = kzalloc(dev->tx_req_bufsize,
  561. GFP_ATOMIC);
  562. if (!req->buf)
  563. goto free_buf;
  564. }
  565. /* req->context is not used for multi_pkt_xfers */
  566. req->context = NULL;
  567. }
  568. return 0;
  569. free_buf:
  570. /* tx_req_bufsize = 0 retries mem alloc on next eth_start_xmit */
  571. dev->tx_req_bufsize = 0;
  572. list_for_each(act, &dev->tx_reqs) {
  573. req = container_of(act, struct usb_request, list);
  574. kfree(req->buf);
  575. req->buf = NULL;
  576. }
  577. return -ENOMEM;
  578. }
  579. static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
  580. struct net_device *net)
  581. {
  582. struct eth_dev *dev = netdev_priv(net);
  583. int length = skb->len;
  584. int retval;
  585. struct usb_request *req = NULL;
  586. unsigned long flags;
  587. struct usb_ep *in;
  588. u16 cdc_filter;
  589. bool multi_pkt_xfer = false;
  590. spin_lock_irqsave(&dev->lock, flags);
  591. if (dev->port_usb) {
  592. in = dev->port_usb->in_ep;
  593. cdc_filter = dev->port_usb->cdc_filter;
  594. multi_pkt_xfer = dev->port_usb->multi_pkt_xfer;
  595. } else {
  596. in = NULL;
  597. cdc_filter = 0;
  598. }
  599. spin_unlock_irqrestore(&dev->lock, flags);
  600. if (!in) {
  601. dev_kfree_skb_any(skb);
  602. return NETDEV_TX_OK;
  603. }
  604. /* Allocate memory for tx_reqs to support multi packet transfer */
  605. spin_lock_irqsave(&dev->req_lock, flags);
  606. if (multi_pkt_xfer && !dev->tx_req_bufsize) {
  607. retval = alloc_tx_buffer(dev);
  608. if (retval < 0) {
  609. spin_unlock_irqrestore(&dev->req_lock, flags);
  610. return -ENOMEM;
  611. }
  612. }
  613. spin_unlock_irqrestore(&dev->req_lock, flags);
  614. /* apply outgoing CDC or RNDIS filters */
  615. if (!is_promisc(cdc_filter)) {
  616. u8 *dest = skb->data;
  617. if (is_multicast_ether_addr(dest)) {
  618. u16 type;
  619. /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
  620. * SET_ETHERNET_MULTICAST_FILTERS requests
  621. */
  622. if (is_broadcast_ether_addr(dest))
  623. type = USB_CDC_PACKET_TYPE_BROADCAST;
  624. else
  625. type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
  626. if (!(cdc_filter & type)) {
  627. dev_kfree_skb_any(skb);
  628. return NETDEV_TX_OK;
  629. }
  630. }
  631. /* ignores USB_CDC_PACKET_TYPE_DIRECTED */
  632. }
  633. spin_lock_irqsave(&dev->req_lock, flags);
  634. /*
  635. * this freelist can be empty if an interrupt triggered disconnect()
  636. * and reconfigured the gadget (shutting down this queue) after the
  637. * network stack decided to xmit but before we got the spinlock.
  638. */
  639. if (list_empty(&dev->tx_reqs)) {
  640. spin_unlock_irqrestore(&dev->req_lock, flags);
  641. return NETDEV_TX_BUSY;
  642. }
  643. req = container_of(dev->tx_reqs.next, struct usb_request, list);
  644. list_del(&req->list);
  645. /* temporarily stop TX queue when the freelist empties */
  646. if (list_empty(&dev->tx_reqs))
  647. netif_stop_queue(net);
  648. spin_unlock_irqrestore(&dev->req_lock, flags);
  649. /* no buffer copies needed, unless the network stack did it
  650. * or the hardware can't use skb buffers.
  651. * or there's not enough space for extra headers we need
  652. */
  653. spin_lock_irqsave(&dev->lock, flags);
  654. if (dev->wrap) {
  655. if (dev->port_usb)
  656. skb = dev->wrap(dev->port_usb, skb);
  657. if (!skb) {
  658. spin_unlock_irqrestore(&dev->lock, flags);
  659. goto drop;
  660. }
  661. }
  662. if (multi_pkt_xfer) {
  663. pr_debug("req->length:%d header_len:%u\n"
  664. "skb->len:%d skb->data_len:%d\n",
  665. req->length, dev->header_len,
  666. skb->len, skb->data_len);
  667. /* Add RNDIS Header */
  668. memcpy(req->buf + req->length, dev->port_usb->header,
  669. dev->header_len);
  670. /* Increment req length by header size */
  671. req->length += dev->header_len;
  672. spin_unlock_irqrestore(&dev->lock, flags);
  673. /* Copy received IP data from SKB */
  674. memcpy(req->buf + req->length, skb->data, skb->len);
  675. /* Increment req length by skb data length */
  676. req->length += skb->len;
  677. length = req->length;
  678. dev_kfree_skb_any(skb);
  679. spin_lock_irqsave(&dev->req_lock, flags);
  680. dev->tx_skb_hold_count++;
  681. if (dev->tx_skb_hold_count < dev->dl_max_pkts_per_xfer) {
  682. if (dev->no_tx_req_used > TX_REQ_THRESHOLD) {
  683. list_add(&req->list, &dev->tx_reqs);
  684. spin_unlock_irqrestore(&dev->req_lock, flags);
  685. goto success;
  686. }
  687. }
  688. dev->no_tx_req_used++;
  689. dev->tx_skb_hold_count = 0;
  690. spin_unlock_irqrestore(&dev->req_lock, flags);
  691. } else {
  692. spin_unlock_irqrestore(&dev->lock, flags);
  693. length = skb->len;
  694. req->buf = skb->data;
  695. req->context = skb;
  696. }
  697. /* NCM requires no zlp if transfer is dwNtbInMaxSize */
  698. if (dev->port_usb->is_fixed &&
  699. length == dev->port_usb->fixed_in_len &&
  700. (length % in->maxpacket) == 0)
  701. req->zero = 0;
  702. else
  703. req->zero = 1;
  704. /* use zlp framing on tx for strict CDC-Ether conformance,
  705. * though any robust network rx path ignores extra padding.
  706. * and some hardware doesn't like to write zlps.
  707. */
  708. if (req->zero && !dev->zlp && (length % in->maxpacket) == 0) {
  709. req->zero = 0;
  710. length++;
  711. }
  712. req->length = length;
  713. /* throttle high/super speed IRQ rate back slightly */
  714. if (gadget_is_dualspeed(dev->gadget) &&
  715. (dev->gadget->speed == USB_SPEED_HIGH ||
  716. dev->gadget->speed == USB_SPEED_SUPER)) {
  717. dev->tx_qlen++;
  718. if (dev->tx_qlen == (qmult/2)) {
  719. req->no_interrupt = 0;
  720. dev->tx_qlen = 0;
  721. } else {
  722. req->no_interrupt = 1;
  723. }
  724. } else {
  725. req->no_interrupt = 0;
  726. }
  727. retval = usb_ep_queue(in, req, GFP_ATOMIC);
  728. switch (retval) {
  729. default:
  730. DBG(dev, "tx queue err %d\n", retval);
  731. break;
  732. case 0:
  733. net->trans_start = jiffies;
  734. }
  735. if (retval) {
  736. if (!multi_pkt_xfer)
  737. dev_kfree_skb_any(skb);
  738. else
  739. req->length = 0;
  740. drop:
  741. dev->net->stats.tx_dropped++;
  742. spin_lock_irqsave(&dev->req_lock, flags);
  743. if (list_empty(&dev->tx_reqs))
  744. netif_start_queue(net);
  745. list_add_tail(&req->list, &dev->tx_reqs);
  746. spin_unlock_irqrestore(&dev->req_lock, flags);
  747. }
  748. success:
  749. return NETDEV_TX_OK;
  750. }
  751. /*-------------------------------------------------------------------------*/
  752. static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
  753. {
  754. DBG(dev, "%s\n", __func__);
  755. /* fill the rx queue */
  756. rx_fill(dev, gfp_flags);
  757. /* and open the tx floodgates */
  758. dev->tx_qlen = 0;
  759. netif_wake_queue(dev->net);
  760. }
  761. static int eth_open(struct net_device *net)
  762. {
  763. struct eth_dev *dev = netdev_priv(net);
  764. struct gether *link;
  765. DBG(dev, "%s\n", __func__);
  766. if (netif_carrier_ok(dev->net))
  767. eth_start(dev, GFP_KERNEL);
  768. spin_lock_irq(&dev->lock);
  769. link = dev->port_usb;
  770. if (link && link->open)
  771. link->open(link);
  772. spin_unlock_irq(&dev->lock);
  773. return 0;
  774. }
  775. static int eth_stop(struct net_device *net)
  776. {
  777. struct eth_dev *dev = netdev_priv(net);
  778. unsigned long flags;
  779. VDBG(dev, "%s\n", __func__);
  780. netif_stop_queue(net);
  781. DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
  782. dev->net->stats.rx_packets, dev->net->stats.tx_packets,
  783. dev->net->stats.rx_errors, dev->net->stats.tx_errors
  784. );
  785. /* ensure there are no more active requests */
  786. spin_lock_irqsave(&dev->lock, flags);
  787. if (dev->port_usb) {
  788. struct gether *link = dev->port_usb;
  789. const struct usb_endpoint_descriptor *in;
  790. const struct usb_endpoint_descriptor *out;
  791. if (link->close)
  792. link->close(link);
  793. /* NOTE: we have no abort-queue primitive we could use
  794. * to cancel all pending I/O. Instead, we disable then
  795. * reenable the endpoints ... this idiom may leave toggle
  796. * wrong, but that's a self-correcting error.
  797. *
  798. * REVISIT: we *COULD* just let the transfers complete at
  799. * their own pace; the network stack can handle old packets.
  800. * For the moment we leave this here, since it works.
  801. */
  802. in = link->in_ep->desc;
  803. out = link->out_ep->desc;
  804. usb_ep_disable(link->in_ep);
  805. usb_ep_disable(link->out_ep);
  806. if (netif_carrier_ok(net)) {
  807. if (config_ep_by_speed(dev->gadget, &link->func,
  808. link->in_ep) ||
  809. config_ep_by_speed(dev->gadget, &link->func,
  810. link->out_ep)) {
  811. link->in_ep->desc = NULL;
  812. link->out_ep->desc = NULL;
  813. return -EINVAL;
  814. }
  815. DBG(dev, "host still using in/out endpoints\n");
  816. link->in_ep->desc = in;
  817. link->out_ep->desc = out;
  818. usb_ep_enable(link->in_ep);
  819. usb_ep_enable(link->out_ep);
  820. }
  821. }
  822. spin_unlock_irqrestore(&dev->lock, flags);
  823. return 0;
  824. }
  825. /*-------------------------------------------------------------------------*/
  826. /* initial value, changed by "ifconfig usb0 hw ether xx:xx:xx:xx:xx:xx" */
  827. static char *dev_addr;
  828. module_param(dev_addr, charp, S_IRUGO);
  829. MODULE_PARM_DESC(dev_addr, "Device Ethernet Address");
  830. /* this address is invisible to ifconfig */
  831. static char *host_addr;
  832. module_param(host_addr, charp, S_IRUGO);
  833. MODULE_PARM_DESC(host_addr, "Host Ethernet Address");
  834. static int get_ether_addr(const char *str, u8 *dev_addr)
  835. {
  836. if (str) {
  837. unsigned i;
  838. for (i = 0; i < 6; i++) {
  839. unsigned char num;
  840. if ((*str == '.') || (*str == ':'))
  841. str++;
  842. num = hex_to_bin(*str++) << 4;
  843. num |= hex_to_bin(*str++);
  844. dev_addr [i] = num;
  845. }
  846. if (is_valid_ether_addr(dev_addr))
  847. return 0;
  848. }
  849. random_ether_addr(dev_addr);
  850. return 1;
  851. }
  852. static struct eth_dev *the_dev;
  853. static const struct net_device_ops eth_netdev_ops = {
  854. .ndo_open = eth_open,
  855. .ndo_stop = eth_stop,
  856. .ndo_start_xmit = eth_start_xmit,
  857. .ndo_change_mtu = ueth_change_mtu,
  858. .ndo_set_mac_address = eth_mac_addr,
  859. .ndo_validate_addr = eth_validate_addr,
  860. };
  861. static struct device_type gadget_type = {
  862. .name = "gadget",
  863. };
  864. /**
  865. * gether_setup - initialize one ethernet-over-usb link
  866. * @g: gadget to associated with these links
  867. * @ethaddr: NULL, or a buffer in which the ethernet address of the
  868. * host side of the link is recorded
  869. * Context: may sleep
  870. *
  871. * This sets up the single network link that may be exported by a
  872. * gadget driver using this framework. The link layer addresses are
  873. * set up using module parameters.
  874. *
  875. * Returns negative errno, or zero on success
  876. */
  877. int gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
  878. {
  879. return gether_setup_name(g, ethaddr, "usb");
  880. }
  881. /**
  882. * gether_setup_name - initialize one ethernet-over-usb link
  883. * @g: gadget to associated with these links
  884. * @ethaddr: NULL, or a buffer in which the ethernet address of the
  885. * host side of the link is recorded
  886. * @netname: name for network device (for example, "usb")
  887. * Context: may sleep
  888. *
  889. * This sets up the single network link that may be exported by a
  890. * gadget driver using this framework. The link layer addresses are
  891. * set up using module parameters.
  892. *
  893. * Returns negative errno, or zero on success
  894. */
  895. int gether_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
  896. const char *netname)
  897. {
  898. struct eth_dev *dev;
  899. struct net_device *net;
  900. int status;
  901. if (the_dev)
  902. return -EBUSY;
  903. net = alloc_etherdev(sizeof *dev);
  904. if (!net)
  905. return -ENOMEM;
  906. dev = netdev_priv(net);
  907. spin_lock_init(&dev->lock);
  908. spin_lock_init(&dev->req_lock);
  909. INIT_WORK(&dev->work, eth_work);
  910. INIT_WORK(&dev->rx_work, process_rx_w);
  911. INIT_LIST_HEAD(&dev->tx_reqs);
  912. INIT_LIST_HEAD(&dev->rx_reqs);
  913. skb_queue_head_init(&dev->rx_frames);
  914. /* network device setup */
  915. dev->net = net;
  916. snprintf(net->name, sizeof(net->name), "%s%%d", netname);
  917. if (get_ether_addr(dev_addr, net->dev_addr))
  918. dev_warn(&g->dev,
  919. "using random %s ethernet address\n", "self");
  920. #ifdef CONFIG_USB_ANDROID_SAMSUNG_COMPOSITE
  921. memcpy(dev->host_mac, ethaddr, ETH_ALEN);
  922. printk(KERN_DEBUG "usb: set unique host mac\n");
  923. #else
  924. if (get_ether_addr(host_addr, dev->host_mac))
  925. dev_warn(&g->dev,
  926. "using random %s ethernet address\n", "host");
  927. if (ethaddr)
  928. memcpy(ethaddr, dev->host_mac, ETH_ALEN);
  929. #endif
  930. net->netdev_ops = &eth_netdev_ops;
  931. SET_ETHTOOL_OPS(net, &ops);
  932. dev->gadget = g;
  933. SET_NETDEV_DEV(net, &g->dev);
  934. SET_NETDEV_DEVTYPE(net, &gadget_type);
  935. status = register_netdev(net);
  936. if (status < 0) {
  937. dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
  938. free_netdev(net);
  939. } else {
  940. INFO(dev, "MAC %pM\n", net->dev_addr);
  941. INFO(dev, "HOST MAC %pM\n", dev->host_mac);
  942. the_dev = dev;
  943. /* two kinds of host-initiated state changes:
  944. * - iff DATA transfer is active, carrier is "on"
  945. * - tx queueing enabled if open *and* carrier is "on"
  946. */
  947. netif_carrier_off(net);
  948. }
  949. return status;
  950. }
  951. /**
  952. * gether_cleanup - remove Ethernet-over-USB device
  953. * Context: may sleep
  954. *
  955. * This is called to free all resources allocated by @gether_setup().
  956. */
  957. void gether_cleanup(void)
  958. {
  959. if (!the_dev)
  960. return;
  961. unregister_netdev(the_dev->net);
  962. flush_work_sync(&the_dev->work);
  963. free_netdev(the_dev->net);
  964. the_dev = NULL;
  965. }
  966. /**
  967. * gether_connect - notify network layer that USB link is active
  968. * @link: the USB link, set up with endpoints, descriptors matching
  969. * current device speed, and any framing wrapper(s) set up.
  970. * Context: irqs blocked
  971. *
  972. * This is called to activate endpoints and let the network layer know
  973. * the connection is active ("carrier detect"). It may cause the I/O
  974. * queues to open and start letting network packets flow, but will in
  975. * any case activate the endpoints so that they respond properly to the
  976. * USB host.
  977. *
  978. * Verify net_device pointer returned using IS_ERR(). If it doesn't
  979. * indicate some error code (negative errno), ep->driver_data values
  980. * have been overwritten.
  981. */
  982. struct net_device *gether_connect(struct gether *link)
  983. {
  984. struct eth_dev *dev = the_dev;
  985. int result = 0;
  986. if (!dev)
  987. return ERR_PTR(-EINVAL);
  988. link->header = kzalloc(sizeof(struct rndis_packet_msg_type),
  989. GFP_ATOMIC);
  990. if (!link->header) {
  991. pr_err("RNDIS header memory allocation failed.\n");
  992. result = -ENOMEM;
  993. goto fail;
  994. }
  995. link->in_ep->driver_data = dev;
  996. result = usb_ep_enable(link->in_ep);
  997. if (result != 0) {
  998. DBG(dev, "enable %s --> %d\n",
  999. link->in_ep->name, result);
  1000. goto fail0;
  1001. }
  1002. link->out_ep->driver_data = dev;
  1003. result = usb_ep_enable(link->out_ep);
  1004. if (result != 0) {
  1005. DBG(dev, "enable %s --> %d\n",
  1006. link->out_ep->name, result);
  1007. goto fail1;
  1008. }
  1009. if (result == 0)
  1010. result = alloc_requests(dev, link, qlen(dev->gadget));
  1011. if (result == 0) {
  1012. dev->zlp = link->is_zlp_ok;
  1013. DBG(dev, "qlen %d\n", qlen(dev->gadget));
  1014. dev->header_len = link->header_len;
  1015. dev->unwrap = link->unwrap;
  1016. dev->wrap = link->wrap;
  1017. dev->ul_max_pkts_per_xfer = link->ul_max_pkts_per_xfer;
  1018. dev->dl_max_pkts_per_xfer = link->dl_max_pkts_per_xfer;
  1019. spin_lock(&dev->lock);
  1020. dev->tx_skb_hold_count = 0;
  1021. dev->no_tx_req_used = 0;
  1022. dev->tx_req_bufsize = 0;
  1023. dev->port_usb = link;
  1024. link->ioport = dev;
  1025. if (netif_running(dev->net)) {
  1026. if (link->open)
  1027. link->open(link);
  1028. } else {
  1029. if (link->close)
  1030. link->close(link);
  1031. }
  1032. spin_unlock(&dev->lock);
  1033. netif_carrier_on(dev->net);
  1034. if (netif_running(dev->net))
  1035. eth_start(dev, GFP_ATOMIC);
  1036. /* on error, disable any endpoints */
  1037. } else {
  1038. (void) usb_ep_disable(link->out_ep);
  1039. fail1:
  1040. (void) usb_ep_disable(link->in_ep);
  1041. }
  1042. /* caller is responsible for cleanup on error */
  1043. if (result < 0) {
  1044. fail0:
  1045. kfree(link->header);
  1046. fail:
  1047. return ERR_PTR(result);
  1048. }
  1049. return dev->net;
  1050. }
  1051. /**
  1052. * gether_disconnect - notify network layer that USB link is inactive
  1053. * @link: the USB link, on which gether_connect() was called
  1054. * Context: irqs blocked
  1055. *
  1056. * This is called to deactivate endpoints and let the network layer know
  1057. * the connection went inactive ("no carrier").
  1058. *
  1059. * On return, the state is as if gether_connect() had never been called.
  1060. * The endpoints are inactive, and accordingly without active USB I/O.
  1061. * Pointers to endpoint descriptors and endpoint private data are nulled.
  1062. */
  1063. void gether_disconnect(struct gether *link)
  1064. {
  1065. struct eth_dev *dev = link->ioport;
  1066. struct usb_request *req;
  1067. struct sk_buff *skb;
  1068. if (!dev)
  1069. return;
  1070. DBG(dev, "%s\n", __func__);
  1071. netif_stop_queue(dev->net);
  1072. netif_carrier_off(dev->net);
  1073. /* disable endpoints, forcing (synchronous) completion
  1074. * of all pending i/o. then free the request objects
  1075. * and forget about the endpoints.
  1076. */
  1077. usb_ep_disable(link->in_ep);
  1078. spin_lock(&dev->req_lock);
  1079. while (!list_empty(&dev->tx_reqs)) {
  1080. req = container_of(dev->tx_reqs.next,
  1081. struct usb_request, list);
  1082. list_del(&req->list);
  1083. spin_unlock(&dev->req_lock);
  1084. if (link->multi_pkt_xfer) {
  1085. kfree(req->buf);
  1086. req->buf = NULL;
  1087. }
  1088. usb_ep_free_request(link->in_ep, req);
  1089. spin_lock(&dev->req_lock);
  1090. }
  1091. /* Free rndis header buffer memory */
  1092. kfree(link->header);
  1093. link->header = NULL;
  1094. spin_unlock(&dev->req_lock);
  1095. link->in_ep->driver_data = NULL;
  1096. link->in_ep->desc = NULL;
  1097. usb_ep_disable(link->out_ep);
  1098. spin_lock(&dev->req_lock);
  1099. while (!list_empty(&dev->rx_reqs)) {
  1100. req = container_of(dev->rx_reqs.next,
  1101. struct usb_request, list);
  1102. list_del(&req->list);
  1103. spin_unlock(&dev->req_lock);
  1104. usb_ep_free_request(link->out_ep, req);
  1105. spin_lock(&dev->req_lock);
  1106. }
  1107. spin_unlock(&dev->req_lock);
  1108. spin_lock(&dev->rx_frames.lock);
  1109. while ((skb = __skb_dequeue(&dev->rx_frames)))
  1110. dev_kfree_skb_any(skb);
  1111. spin_unlock(&dev->rx_frames.lock);
  1112. link->out_ep->driver_data = NULL;
  1113. link->out_ep->desc = NULL;
  1114. /* finish forgetting about this USB link episode */
  1115. dev->header_len = 0;
  1116. dev->unwrap = NULL;
  1117. dev->wrap = NULL;
  1118. spin_lock(&dev->lock);
  1119. dev->port_usb = NULL;
  1120. link->ioport = NULL;
  1121. spin_unlock(&dev->lock);
  1122. }
  1123. static int __init gether_init(void)
  1124. {
  1125. uether_wq = create_singlethread_workqueue("uether");
  1126. if (!uether_wq) {
  1127. pr_err("%s: Unable to create workqueue: uether\n", __func__);
  1128. return -ENOMEM;
  1129. }
  1130. return 0;
  1131. }
  1132. module_init(gether_init);
  1133. static void __exit gether_exit(void)
  1134. {
  1135. destroy_workqueue(uether_wq);
  1136. }
  1137. module_exit(gether_exit);
  1138. MODULE_DESCRIPTION("ethernet over USB driver");
  1139. MODULE_LICENSE("GPL v2");