vport.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406
  1. /*
  2. * Copyright (c) 2007-2011 Nicira Networks.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of version 2 of the GNU General Public
  6. * License as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public License
  14. * along with this program; if not, write to the Free Software
  15. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  16. * 02110-1301, USA
  17. */
  18. #include <linux/dcache.h>
  19. #include <linux/etherdevice.h>
  20. #include <linux/if.h>
  21. #include <linux/if_vlan.h>
  22. #include <linux/kernel.h>
  23. #include <linux/list.h>
  24. #include <linux/mutex.h>
  25. #include <linux/percpu.h>
  26. #include <linux/rcupdate.h>
  27. #include <linux/rtnetlink.h>
  28. #include <linux/compat.h>
  29. #include "vport.h"
  30. #include "vport-internal_dev.h"
  31. /* List of statically compiled vport implementations. Don't forget to also
  32. * add yours to the list at the bottom of vport.h. */
  33. static const struct vport_ops *vport_ops_list[] = {
  34. &ovs_netdev_vport_ops,
  35. &ovs_internal_vport_ops,
  36. };
  37. /* Protected by RCU read lock for reading, RTNL lock for writing. */
  38. static struct hlist_head *dev_table;
  39. #define VPORT_HASH_BUCKETS 1024
  40. /**
  41. * ovs_vport_init - initialize vport subsystem
  42. *
  43. * Called at module load time to initialize the vport subsystem.
  44. */
  45. int ovs_vport_init(void)
  46. {
  47. dev_table = kzalloc(VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
  48. GFP_KERNEL);
  49. if (!dev_table)
  50. return -ENOMEM;
  51. return 0;
  52. }
  53. /**
  54. * ovs_vport_exit - shutdown vport subsystem
  55. *
  56. * Called at module exit time to shutdown the vport subsystem.
  57. */
  58. void ovs_vport_exit(void)
  59. {
  60. kfree(dev_table);
  61. }
  62. static struct hlist_head *hash_bucket(const char *name)
  63. {
  64. unsigned int hash = full_name_hash(name, strlen(name));
  65. return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)];
  66. }
  67. /**
  68. * ovs_vport_locate - find a port that has already been created
  69. *
  70. * @name: name of port to find
  71. *
  72. * Must be called with RTNL or RCU read lock.
  73. */
  74. struct vport *ovs_vport_locate(const char *name)
  75. {
  76. struct hlist_head *bucket = hash_bucket(name);
  77. struct vport *vport;
  78. struct hlist_node *node;
  79. hlist_for_each_entry_rcu(vport, node, bucket, hash_node)
  80. if (!strcmp(name, vport->ops->get_name(vport)))
  81. return vport;
  82. return NULL;
  83. }
  84. /**
  85. * ovs_vport_alloc - allocate and initialize new vport
  86. *
  87. * @priv_size: Size of private data area to allocate.
  88. * @ops: vport device ops
  89. *
  90. * Allocate and initialize a new vport defined by @ops. The vport will contain
  91. * a private data area of size @priv_size that can be accessed using
  92. * vport_priv(). vports that are no longer needed should be released with
  93. * vport_free().
  94. */
  95. struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
  96. const struct vport_parms *parms)
  97. {
  98. struct vport *vport;
  99. size_t alloc_size;
  100. int i;
  101. alloc_size = sizeof(struct vport);
  102. if (priv_size) {
  103. alloc_size = ALIGN(alloc_size, VPORT_ALIGN);
  104. alloc_size += priv_size;
  105. }
  106. vport = kzalloc(alloc_size, GFP_KERNEL);
  107. if (!vport)
  108. return ERR_PTR(-ENOMEM);
  109. vport->dp = parms->dp;
  110. vport->port_no = parms->port_no;
  111. vport->upcall_pid = parms->upcall_pid;
  112. vport->ops = ops;
  113. vport->percpu_stats = alloc_percpu(struct vport_percpu_stats);
  114. if (!vport->percpu_stats) {
  115. kfree(vport);
  116. return ERR_PTR(-ENOMEM);
  117. }
  118. for_each_possible_cpu(i) {
  119. struct pcpu_tstats *vport_stats;
  120. vport_stats = per_cpu_ptr(vport->percpu_stats, i);
  121. u64_stats_init(&vport_stats->syncp);
  122. }
  123. spin_lock_init(&vport->stats_lock);
  124. return vport;
  125. }
  126. /**
  127. * ovs_vport_free - uninitialize and free vport
  128. *
  129. * @vport: vport to free
  130. *
  131. * Frees a vport allocated with vport_alloc() when it is no longer needed.
  132. *
  133. * The caller must ensure that an RCU grace period has passed since the last
  134. * time @vport was in a datapath.
  135. */
  136. void ovs_vport_free(struct vport *vport)
  137. {
  138. free_percpu(vport->percpu_stats);
  139. kfree(vport);
  140. }
  141. /**
  142. * ovs_vport_add - add vport device (for kernel callers)
  143. *
  144. * @parms: Information about new vport.
  145. *
  146. * Creates a new vport with the specified configuration (which is dependent on
  147. * device type). RTNL lock must be held.
  148. */
  149. struct vport *ovs_vport_add(const struct vport_parms *parms)
  150. {
  151. struct vport *vport;
  152. int err = 0;
  153. int i;
  154. ASSERT_RTNL();
  155. for (i = 0; i < ARRAY_SIZE(vport_ops_list); i++) {
  156. if (vport_ops_list[i]->type == parms->type) {
  157. vport = vport_ops_list[i]->create(parms);
  158. if (IS_ERR(vport)) {
  159. err = PTR_ERR(vport);
  160. goto out;
  161. }
  162. hlist_add_head_rcu(&vport->hash_node,
  163. hash_bucket(vport->ops->get_name(vport)));
  164. return vport;
  165. }
  166. }
  167. err = -EAFNOSUPPORT;
  168. out:
  169. return ERR_PTR(err);
  170. }
  171. /**
  172. * ovs_vport_set_options - modify existing vport device (for kernel callers)
  173. *
  174. * @vport: vport to modify.
  175. * @port: New configuration.
  176. *
  177. * Modifies an existing device with the specified configuration (which is
  178. * dependent on device type). RTNL lock must be held.
  179. */
  180. int ovs_vport_set_options(struct vport *vport, struct nlattr *options)
  181. {
  182. ASSERT_RTNL();
  183. if (!vport->ops->set_options)
  184. return -EOPNOTSUPP;
  185. return vport->ops->set_options(vport, options);
  186. }
  187. /**
  188. * ovs_vport_del - delete existing vport device
  189. *
  190. * @vport: vport to delete.
  191. *
  192. * Detaches @vport from its datapath and destroys it. It is possible to fail
  193. * for reasons such as lack of memory. RTNL lock must be held.
  194. */
  195. void ovs_vport_del(struct vport *vport)
  196. {
  197. ASSERT_RTNL();
  198. hlist_del_rcu(&vport->hash_node);
  199. vport->ops->destroy(vport);
  200. }
  201. /**
  202. * ovs_vport_get_stats - retrieve device stats
  203. *
  204. * @vport: vport from which to retrieve the stats
  205. * @stats: location to store stats
  206. *
  207. * Retrieves transmit, receive, and error stats for the given device.
  208. *
  209. * Must be called with RTNL lock or rcu_read_lock.
  210. */
  211. void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
  212. {
  213. int i;
  214. memset(stats, 0, sizeof(*stats));
  215. /* We potentially have 2 sources of stats that need to be combined:
  216. * those we have collected (split into err_stats and percpu_stats) from
  217. * set_stats() and device error stats from netdev->get_stats() (for
  218. * errors that happen downstream and therefore aren't reported through
  219. * our vport_record_error() function).
  220. * Stats from first source are reported by ovs (OVS_VPORT_ATTR_STATS).
  221. * netdev-stats can be directly read over netlink-ioctl.
  222. */
  223. spin_lock_bh(&vport->stats_lock);
  224. stats->rx_errors = vport->err_stats.rx_errors;
  225. stats->tx_errors = vport->err_stats.tx_errors;
  226. stats->tx_dropped = vport->err_stats.tx_dropped;
  227. stats->rx_dropped = vport->err_stats.rx_dropped;
  228. spin_unlock_bh(&vport->stats_lock);
  229. for_each_possible_cpu(i) {
  230. const struct vport_percpu_stats *percpu_stats;
  231. struct vport_percpu_stats local_stats;
  232. unsigned int start;
  233. percpu_stats = per_cpu_ptr(vport->percpu_stats, i);
  234. do {
  235. start = u64_stats_fetch_begin_irq(&percpu_stats->sync);
  236. local_stats = *percpu_stats;
  237. } while (u64_stats_fetch_retry_irq(&percpu_stats->sync, start));
  238. stats->rx_bytes += local_stats.rx_bytes;
  239. stats->rx_packets += local_stats.rx_packets;
  240. stats->tx_bytes += local_stats.tx_bytes;
  241. stats->tx_packets += local_stats.tx_packets;
  242. }
  243. }
  244. /**
  245. * ovs_vport_get_options - retrieve device options
  246. *
  247. * @vport: vport from which to retrieve the options.
  248. * @skb: sk_buff where options should be appended.
  249. *
  250. * Retrieves the configuration of the given device, appending an
  251. * %OVS_VPORT_ATTR_OPTIONS attribute that in turn contains nested
  252. * vport-specific attributes to @skb.
  253. *
  254. * Returns 0 if successful, -EMSGSIZE if @skb has insufficient room, or another
  255. * negative error code if a real error occurred. If an error occurs, @skb is
  256. * left unmodified.
  257. *
  258. * Must be called with RTNL lock or rcu_read_lock.
  259. */
  260. int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb)
  261. {
  262. struct nlattr *nla;
  263. nla = nla_nest_start(skb, OVS_VPORT_ATTR_OPTIONS);
  264. if (!nla)
  265. return -EMSGSIZE;
  266. if (vport->ops->get_options) {
  267. int err = vport->ops->get_options(vport, skb);
  268. if (err) {
  269. nla_nest_cancel(skb, nla);
  270. return err;
  271. }
  272. }
  273. nla_nest_end(skb, nla);
  274. return 0;
  275. }
  276. /**
  277. * ovs_vport_receive - pass up received packet to the datapath for processing
  278. *
  279. * @vport: vport that received the packet
  280. * @skb: skb that was received
  281. *
  282. * Must be called with rcu_read_lock. The packet cannot be shared and
  283. * skb->data should point to the Ethernet header. The caller must have already
  284. * called compute_ip_summed() to initialize the checksumming fields.
  285. */
  286. void ovs_vport_receive(struct vport *vport, struct sk_buff *skb)
  287. {
  288. struct vport_percpu_stats *stats;
  289. stats = per_cpu_ptr(vport->percpu_stats, smp_processor_id());
  290. u64_stats_update_begin(&stats->sync);
  291. stats->rx_packets++;
  292. stats->rx_bytes += skb->len;
  293. u64_stats_update_end(&stats->sync);
  294. ovs_dp_process_received_packet(vport, skb);
  295. }
  296. /**
  297. * ovs_vport_send - send a packet on a device
  298. *
  299. * @vport: vport on which to send the packet
  300. * @skb: skb to send
  301. *
  302. * Sends the given packet and returns the length of data sent. Either RTNL
  303. * lock or rcu_read_lock must be held.
  304. */
  305. int ovs_vport_send(struct vport *vport, struct sk_buff *skb)
  306. {
  307. int sent = vport->ops->send(vport, skb);
  308. if (likely(sent)) {
  309. struct vport_percpu_stats *stats;
  310. stats = per_cpu_ptr(vport->percpu_stats, smp_processor_id());
  311. u64_stats_update_begin(&stats->sync);
  312. stats->tx_packets++;
  313. stats->tx_bytes += sent;
  314. u64_stats_update_end(&stats->sync);
  315. }
  316. return sent;
  317. }
  318. /**
  319. * ovs_vport_record_error - indicate device error to generic stats layer
  320. *
  321. * @vport: vport that encountered the error
  322. * @err_type: one of enum vport_err_type types to indicate the error type
  323. *
  324. * If using the vport generic stats layer indicate that an error of the given
  325. * type has occured.
  326. */
  327. void ovs_vport_record_error(struct vport *vport, enum vport_err_type err_type)
  328. {
  329. spin_lock(&vport->stats_lock);
  330. switch (err_type) {
  331. case VPORT_E_RX_DROPPED:
  332. vport->err_stats.rx_dropped++;
  333. break;
  334. case VPORT_E_RX_ERROR:
  335. vport->err_stats.rx_errors++;
  336. break;
  337. case VPORT_E_TX_DROPPED:
  338. vport->err_stats.tx_dropped++;
  339. break;
  340. case VPORT_E_TX_ERROR:
  341. vport->err_stats.tx_errors++;
  342. break;
  343. };
  344. spin_unlock(&vport->stats_lock);
  345. }