device.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776
  1. /*
  2. * Copyright (c) 2004 Topspin Communications. All rights reserved.
  3. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/module.h>
  34. #include <linux/string.h>
  35. #include <linux/errno.h>
  36. #include <linux/kernel.h>
  37. #include <linux/slab.h>
  38. #include <linux/init.h>
  39. #include <linux/mutex.h>
  40. #include <rdma/rdma_netlink.h>
  41. #include "core_priv.h"
  42. MODULE_AUTHOR("Roland Dreier");
  43. MODULE_DESCRIPTION("core kernel InfiniBand API");
  44. MODULE_LICENSE("Dual BSD/GPL");
  45. struct ib_client_data {
  46. struct list_head list;
  47. struct ib_client *client;
  48. void * data;
  49. };
  50. struct workqueue_struct *ib_wq;
  51. EXPORT_SYMBOL_GPL(ib_wq);
  52. static LIST_HEAD(device_list);
  53. static LIST_HEAD(client_list);
  54. /*
  55. * device_mutex protects access to both device_list and client_list.
  56. * There's no real point to using multiple locks or something fancier
  57. * like an rwsem: we always access both lists, and we're always
  58. * modifying one list or the other list. In any case this is not a
  59. * hot path so there's no point in trying to optimize.
  60. */
  61. static DEFINE_MUTEX(device_mutex);
  62. static int ib_device_check_mandatory(struct ib_device *device)
  63. {
  64. #define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device, x), #x }
  65. static const struct {
  66. size_t offset;
  67. char *name;
  68. } mandatory_table[] = {
  69. IB_MANDATORY_FUNC(query_device),
  70. IB_MANDATORY_FUNC(query_port),
  71. IB_MANDATORY_FUNC(query_pkey),
  72. IB_MANDATORY_FUNC(query_gid),
  73. IB_MANDATORY_FUNC(alloc_pd),
  74. IB_MANDATORY_FUNC(dealloc_pd),
  75. IB_MANDATORY_FUNC(create_ah),
  76. IB_MANDATORY_FUNC(destroy_ah),
  77. IB_MANDATORY_FUNC(create_qp),
  78. IB_MANDATORY_FUNC(modify_qp),
  79. IB_MANDATORY_FUNC(destroy_qp),
  80. IB_MANDATORY_FUNC(post_send),
  81. IB_MANDATORY_FUNC(post_recv),
  82. IB_MANDATORY_FUNC(create_cq),
  83. IB_MANDATORY_FUNC(destroy_cq),
  84. IB_MANDATORY_FUNC(poll_cq),
  85. IB_MANDATORY_FUNC(req_notify_cq),
  86. IB_MANDATORY_FUNC(get_dma_mr),
  87. IB_MANDATORY_FUNC(dereg_mr)
  88. };
  89. int i;
  90. for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) {
  91. if (!*(void **) ((void *) device + mandatory_table[i].offset)) {
  92. printk(KERN_WARNING "Device %s is missing mandatory function %s\n",
  93. device->name, mandatory_table[i].name);
  94. return -EINVAL;
  95. }
  96. }
  97. return 0;
  98. }
  99. static struct ib_device *__ib_device_get_by_name(const char *name)
  100. {
  101. struct ib_device *device;
  102. list_for_each_entry(device, &device_list, core_list)
  103. if (!strncmp(name, device->name, IB_DEVICE_NAME_MAX))
  104. return device;
  105. return NULL;
  106. }
  107. static int alloc_name(char *name)
  108. {
  109. unsigned long *inuse;
  110. char buf[IB_DEVICE_NAME_MAX];
  111. struct ib_device *device;
  112. int i;
  113. inuse = (unsigned long *) get_zeroed_page(GFP_KERNEL);
  114. if (!inuse)
  115. return -ENOMEM;
  116. list_for_each_entry(device, &device_list, core_list) {
  117. if (!sscanf(device->name, name, &i))
  118. continue;
  119. if (i < 0 || i >= PAGE_SIZE * 8)
  120. continue;
  121. snprintf(buf, sizeof buf, name, i);
  122. if (!strncmp(buf, device->name, IB_DEVICE_NAME_MAX))
  123. set_bit(i, inuse);
  124. }
  125. i = find_first_zero_bit(inuse, PAGE_SIZE * 8);
  126. free_page((unsigned long) inuse);
  127. snprintf(buf, sizeof buf, name, i);
  128. if (__ib_device_get_by_name(buf))
  129. return -ENFILE;
  130. strlcpy(name, buf, IB_DEVICE_NAME_MAX);
  131. return 0;
  132. }
  133. static int start_port(struct ib_device *device)
  134. {
  135. return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1;
  136. }
  137. static int end_port(struct ib_device *device)
  138. {
  139. return (device->node_type == RDMA_NODE_IB_SWITCH) ?
  140. 0 : device->phys_port_cnt;
  141. }
  142. /**
  143. * ib_alloc_device - allocate an IB device struct
  144. * @size:size of structure to allocate
  145. *
  146. * Low-level drivers should use ib_alloc_device() to allocate &struct
  147. * ib_device. @size is the size of the structure to be allocated,
  148. * including any private data used by the low-level driver.
  149. * ib_dealloc_device() must be used to free structures allocated with
  150. * ib_alloc_device().
  151. */
  152. struct ib_device *ib_alloc_device(size_t size)
  153. {
  154. BUG_ON(size < sizeof (struct ib_device));
  155. return kzalloc(size, GFP_KERNEL);
  156. }
  157. EXPORT_SYMBOL(ib_alloc_device);
  158. /**
  159. * ib_dealloc_device - free an IB device struct
  160. * @device:structure to free
  161. *
  162. * Free a structure allocated with ib_alloc_device().
  163. */
  164. void ib_dealloc_device(struct ib_device *device)
  165. {
  166. if (device->reg_state == IB_DEV_UNINITIALIZED) {
  167. kfree(device);
  168. return;
  169. }
  170. BUG_ON(device->reg_state != IB_DEV_UNREGISTERED);
  171. kobject_put(&device->dev.kobj);
  172. }
  173. EXPORT_SYMBOL(ib_dealloc_device);
  174. static int add_client_context(struct ib_device *device, struct ib_client *client)
  175. {
  176. struct ib_client_data *context;
  177. unsigned long flags;
  178. context = kmalloc(sizeof *context, GFP_KERNEL);
  179. if (!context) {
  180. printk(KERN_WARNING "Couldn't allocate client context for %s/%s\n",
  181. device->name, client->name);
  182. return -ENOMEM;
  183. }
  184. context->client = client;
  185. context->data = NULL;
  186. spin_lock_irqsave(&device->client_data_lock, flags);
  187. list_add(&context->list, &device->client_data_list);
  188. spin_unlock_irqrestore(&device->client_data_lock, flags);
  189. return 0;
  190. }
  191. static int read_port_table_lengths(struct ib_device *device)
  192. {
  193. struct ib_port_attr *tprops = NULL;
  194. int num_ports, ret = -ENOMEM;
  195. u8 port_index;
  196. tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
  197. if (!tprops)
  198. goto out;
  199. num_ports = end_port(device) - start_port(device) + 1;
  200. device->pkey_tbl_len = kmalloc(sizeof *device->pkey_tbl_len * num_ports,
  201. GFP_KERNEL);
  202. device->gid_tbl_len = kmalloc(sizeof *device->gid_tbl_len * num_ports,
  203. GFP_KERNEL);
  204. if (!device->pkey_tbl_len || !device->gid_tbl_len)
  205. goto err;
  206. for (port_index = 0; port_index < num_ports; ++port_index) {
  207. ret = ib_query_port(device, port_index + start_port(device),
  208. tprops);
  209. if (ret)
  210. goto err;
  211. device->pkey_tbl_len[port_index] = tprops->pkey_tbl_len;
  212. device->gid_tbl_len[port_index] = tprops->gid_tbl_len;
  213. }
  214. ret = 0;
  215. goto out;
  216. err:
  217. kfree(device->gid_tbl_len);
  218. kfree(device->pkey_tbl_len);
  219. out:
  220. kfree(tprops);
  221. return ret;
  222. }
  223. /**
  224. * ib_register_device - Register an IB device with IB core
  225. * @device:Device to register
  226. *
  227. * Low-level drivers use ib_register_device() to register their
  228. * devices with the IB core. All registered clients will receive a
  229. * callback for each device that is added. @device must be allocated
  230. * with ib_alloc_device().
  231. */
  232. int ib_register_device(struct ib_device *device,
  233. int (*port_callback)(struct ib_device *,
  234. u8, struct kobject *))
  235. {
  236. int ret;
  237. mutex_lock(&device_mutex);
  238. if (strchr(device->name, '%')) {
  239. ret = alloc_name(device->name);
  240. if (ret)
  241. goto out;
  242. }
  243. if (ib_device_check_mandatory(device)) {
  244. ret = -EINVAL;
  245. goto out;
  246. }
  247. INIT_LIST_HEAD(&device->event_handler_list);
  248. INIT_LIST_HEAD(&device->client_data_list);
  249. spin_lock_init(&device->event_handler_lock);
  250. spin_lock_init(&device->client_data_lock);
  251. ret = read_port_table_lengths(device);
  252. if (ret) {
  253. printk(KERN_WARNING "Couldn't create table lengths cache for device %s\n",
  254. device->name);
  255. goto out;
  256. }
  257. ret = ib_device_register_sysfs(device, port_callback);
  258. if (ret) {
  259. printk(KERN_WARNING "Couldn't register device %s with driver model\n",
  260. device->name);
  261. kfree(device->gid_tbl_len);
  262. kfree(device->pkey_tbl_len);
  263. goto out;
  264. }
  265. list_add_tail(&device->core_list, &device_list);
  266. device->reg_state = IB_DEV_REGISTERED;
  267. {
  268. struct ib_client *client;
  269. list_for_each_entry(client, &client_list, list)
  270. if (client->add && !add_client_context(device, client))
  271. client->add(device);
  272. }
  273. out:
  274. mutex_unlock(&device_mutex);
  275. return ret;
  276. }
  277. EXPORT_SYMBOL(ib_register_device);
  278. /**
  279. * ib_unregister_device - Unregister an IB device
  280. * @device:Device to unregister
  281. *
  282. * Unregister an IB device. All clients will receive a remove callback.
  283. */
  284. void ib_unregister_device(struct ib_device *device)
  285. {
  286. struct ib_client *client;
  287. struct ib_client_data *context, *tmp;
  288. unsigned long flags;
  289. mutex_lock(&device_mutex);
  290. list_for_each_entry_reverse(client, &client_list, list)
  291. if (client->remove)
  292. client->remove(device);
  293. list_del(&device->core_list);
  294. kfree(device->gid_tbl_len);
  295. kfree(device->pkey_tbl_len);
  296. mutex_unlock(&device_mutex);
  297. ib_device_unregister_sysfs(device);
  298. spin_lock_irqsave(&device->client_data_lock, flags);
  299. list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
  300. kfree(context);
  301. spin_unlock_irqrestore(&device->client_data_lock, flags);
  302. device->reg_state = IB_DEV_UNREGISTERED;
  303. }
  304. EXPORT_SYMBOL(ib_unregister_device);
  305. /**
  306. * ib_register_client - Register an IB client
  307. * @client:Client to register
  308. *
  309. * Upper level users of the IB drivers can use ib_register_client() to
  310. * register callbacks for IB device addition and removal. When an IB
  311. * device is added, each registered client's add method will be called
  312. * (in the order the clients were registered), and when a device is
  313. * removed, each client's remove method will be called (in the reverse
  314. * order that clients were registered). In addition, when
  315. * ib_register_client() is called, the client will receive an add
  316. * callback for all devices already registered.
  317. */
  318. int ib_register_client(struct ib_client *client)
  319. {
  320. struct ib_device *device;
  321. mutex_lock(&device_mutex);
  322. list_add_tail(&client->list, &client_list);
  323. list_for_each_entry(device, &device_list, core_list)
  324. if (client->add && !add_client_context(device, client))
  325. client->add(device);
  326. mutex_unlock(&device_mutex);
  327. return 0;
  328. }
  329. EXPORT_SYMBOL(ib_register_client);
  330. /**
  331. * ib_unregister_client - Unregister an IB client
  332. * @client:Client to unregister
  333. *
  334. * Upper level users use ib_unregister_client() to remove their client
  335. * registration. When ib_unregister_client() is called, the client
  336. * will receive a remove callback for each IB device still registered.
  337. */
  338. void ib_unregister_client(struct ib_client *client)
  339. {
  340. struct ib_client_data *context, *tmp;
  341. struct ib_device *device;
  342. unsigned long flags;
  343. mutex_lock(&device_mutex);
  344. list_for_each_entry(device, &device_list, core_list) {
  345. if (client->remove)
  346. client->remove(device);
  347. spin_lock_irqsave(&device->client_data_lock, flags);
  348. list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
  349. if (context->client == client) {
  350. list_del(&context->list);
  351. kfree(context);
  352. }
  353. spin_unlock_irqrestore(&device->client_data_lock, flags);
  354. }
  355. list_del(&client->list);
  356. mutex_unlock(&device_mutex);
  357. }
  358. EXPORT_SYMBOL(ib_unregister_client);
  359. /**
  360. * ib_get_client_data - Get IB client context
  361. * @device:Device to get context for
  362. * @client:Client to get context for
  363. *
  364. * ib_get_client_data() returns client context set with
  365. * ib_set_client_data().
  366. */
  367. void *ib_get_client_data(struct ib_device *device, struct ib_client *client)
  368. {
  369. struct ib_client_data *context;
  370. void *ret = NULL;
  371. unsigned long flags;
  372. spin_lock_irqsave(&device->client_data_lock, flags);
  373. list_for_each_entry(context, &device->client_data_list, list)
  374. if (context->client == client) {
  375. ret = context->data;
  376. break;
  377. }
  378. spin_unlock_irqrestore(&device->client_data_lock, flags);
  379. return ret;
  380. }
  381. EXPORT_SYMBOL(ib_get_client_data);
  382. /**
  383. * ib_set_client_data - Set IB client context
  384. * @device:Device to set context for
  385. * @client:Client to set context for
  386. * @data:Context to set
  387. *
  388. * ib_set_client_data() sets client context that can be retrieved with
  389. * ib_get_client_data().
  390. */
  391. void ib_set_client_data(struct ib_device *device, struct ib_client *client,
  392. void *data)
  393. {
  394. struct ib_client_data *context;
  395. unsigned long flags;
  396. spin_lock_irqsave(&device->client_data_lock, flags);
  397. list_for_each_entry(context, &device->client_data_list, list)
  398. if (context->client == client) {
  399. context->data = data;
  400. goto out;
  401. }
  402. printk(KERN_WARNING "No client context found for %s/%s\n",
  403. device->name, client->name);
  404. out:
  405. spin_unlock_irqrestore(&device->client_data_lock, flags);
  406. }
  407. EXPORT_SYMBOL(ib_set_client_data);
  408. /**
  409. * ib_register_event_handler - Register an IB event handler
  410. * @event_handler:Handler to register
  411. *
  412. * ib_register_event_handler() registers an event handler that will be
  413. * called back when asynchronous IB events occur (as defined in
  414. * chapter 11 of the InfiniBand Architecture Specification). This
  415. * callback may occur in interrupt context.
  416. */
  417. int ib_register_event_handler (struct ib_event_handler *event_handler)
  418. {
  419. unsigned long flags;
  420. spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
  421. list_add_tail(&event_handler->list,
  422. &event_handler->device->event_handler_list);
  423. spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
  424. return 0;
  425. }
  426. EXPORT_SYMBOL(ib_register_event_handler);
  427. /**
  428. * ib_unregister_event_handler - Unregister an event handler
  429. * @event_handler:Handler to unregister
  430. *
  431. * Unregister an event handler registered with
  432. * ib_register_event_handler().
  433. */
  434. int ib_unregister_event_handler(struct ib_event_handler *event_handler)
  435. {
  436. unsigned long flags;
  437. spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
  438. list_del(&event_handler->list);
  439. spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
  440. return 0;
  441. }
  442. EXPORT_SYMBOL(ib_unregister_event_handler);
  443. /**
  444. * ib_dispatch_event - Dispatch an asynchronous event
  445. * @event:Event to dispatch
  446. *
  447. * Low-level drivers must call ib_dispatch_event() to dispatch the
  448. * event to all registered event handlers when an asynchronous event
  449. * occurs.
  450. */
  451. void ib_dispatch_event(struct ib_event *event)
  452. {
  453. unsigned long flags;
  454. struct ib_event_handler *handler;
  455. spin_lock_irqsave(&event->device->event_handler_lock, flags);
  456. list_for_each_entry(handler, &event->device->event_handler_list, list)
  457. handler->handler(handler, event);
  458. spin_unlock_irqrestore(&event->device->event_handler_lock, flags);
  459. }
  460. EXPORT_SYMBOL(ib_dispatch_event);
  461. /**
  462. * ib_query_device - Query IB device attributes
  463. * @device:Device to query
  464. * @device_attr:Device attributes
  465. *
  466. * ib_query_device() returns the attributes of a device through the
  467. * @device_attr pointer.
  468. */
  469. int ib_query_device(struct ib_device *device,
  470. struct ib_device_attr *device_attr)
  471. {
  472. return device->query_device(device, device_attr);
  473. }
  474. EXPORT_SYMBOL(ib_query_device);
  475. /**
  476. * ib_query_port - Query IB port attributes
  477. * @device:Device to query
  478. * @port_num:Port number to query
  479. * @port_attr:Port attributes
  480. *
  481. * ib_query_port() returns the attributes of a port through the
  482. * @port_attr pointer.
  483. */
  484. int ib_query_port(struct ib_device *device,
  485. u8 port_num,
  486. struct ib_port_attr *port_attr)
  487. {
  488. if (port_num < start_port(device) || port_num > end_port(device))
  489. return -EINVAL;
  490. return device->query_port(device, port_num, port_attr);
  491. }
  492. EXPORT_SYMBOL(ib_query_port);
  493. /**
  494. * ib_query_gid - Get GID table entry
  495. * @device:Device to query
  496. * @port_num:Port number to query
  497. * @index:GID table index to query
  498. * @gid:Returned GID
  499. *
  500. * ib_query_gid() fetches the specified GID table entry.
  501. */
  502. int ib_query_gid(struct ib_device *device,
  503. u8 port_num, int index, union ib_gid *gid)
  504. {
  505. return device->query_gid(device, port_num, index, gid);
  506. }
  507. EXPORT_SYMBOL(ib_query_gid);
  508. /**
  509. * ib_query_pkey - Get P_Key table entry
  510. * @device:Device to query
  511. * @port_num:Port number to query
  512. * @index:P_Key table index to query
  513. * @pkey:Returned P_Key
  514. *
  515. * ib_query_pkey() fetches the specified P_Key table entry.
  516. */
  517. int ib_query_pkey(struct ib_device *device,
  518. u8 port_num, u16 index, u16 *pkey)
  519. {
  520. return device->query_pkey(device, port_num, index, pkey);
  521. }
  522. EXPORT_SYMBOL(ib_query_pkey);
  523. /**
  524. * ib_modify_device - Change IB device attributes
  525. * @device:Device to modify
  526. * @device_modify_mask:Mask of attributes to change
  527. * @device_modify:New attribute values
  528. *
  529. * ib_modify_device() changes a device's attributes as specified by
  530. * the @device_modify_mask and @device_modify structure.
  531. */
  532. int ib_modify_device(struct ib_device *device,
  533. int device_modify_mask,
  534. struct ib_device_modify *device_modify)
  535. {
  536. if (!device->modify_device)
  537. return -ENOSYS;
  538. return device->modify_device(device, device_modify_mask,
  539. device_modify);
  540. }
  541. EXPORT_SYMBOL(ib_modify_device);
  542. /**
  543. * ib_modify_port - Modifies the attributes for the specified port.
  544. * @device: The device to modify.
  545. * @port_num: The number of the port to modify.
  546. * @port_modify_mask: Mask used to specify which attributes of the port
  547. * to change.
  548. * @port_modify: New attribute values for the port.
  549. *
  550. * ib_modify_port() changes a port's attributes as specified by the
  551. * @port_modify_mask and @port_modify structure.
  552. */
  553. int ib_modify_port(struct ib_device *device,
  554. u8 port_num, int port_modify_mask,
  555. struct ib_port_modify *port_modify)
  556. {
  557. if (!device->modify_port)
  558. return -ENOSYS;
  559. if (port_num < start_port(device) || port_num > end_port(device))
  560. return -EINVAL;
  561. return device->modify_port(device, port_num, port_modify_mask,
  562. port_modify);
  563. }
  564. EXPORT_SYMBOL(ib_modify_port);
  565. /**
  566. * ib_find_gid - Returns the port number and GID table index where
  567. * a specified GID value occurs.
  568. * @device: The device to query.
  569. * @gid: The GID value to search for.
  570. * @port_num: The port number of the device where the GID value was found.
  571. * @index: The index into the GID table where the GID was found. This
  572. * parameter may be NULL.
  573. */
  574. int ib_find_gid(struct ib_device *device, union ib_gid *gid,
  575. u8 *port_num, u16 *index)
  576. {
  577. union ib_gid tmp_gid;
  578. int ret, port, i;
  579. for (port = start_port(device); port <= end_port(device); ++port) {
  580. for (i = 0; i < device->gid_tbl_len[port - start_port(device)]; ++i) {
  581. ret = ib_query_gid(device, port, i, &tmp_gid);
  582. if (ret)
  583. return ret;
  584. if (!memcmp(&tmp_gid, gid, sizeof *gid)) {
  585. *port_num = port;
  586. if (index)
  587. *index = i;
  588. return 0;
  589. }
  590. }
  591. }
  592. return -ENOENT;
  593. }
  594. EXPORT_SYMBOL(ib_find_gid);
  595. /**
  596. * ib_find_pkey - Returns the PKey table index where a specified
  597. * PKey value occurs.
  598. * @device: The device to query.
  599. * @port_num: The port number of the device to search for the PKey.
  600. * @pkey: The PKey value to search for.
  601. * @index: The index into the PKey table where the PKey was found.
  602. */
  603. int ib_find_pkey(struct ib_device *device,
  604. u8 port_num, u16 pkey, u16 *index)
  605. {
  606. int ret, i;
  607. u16 tmp_pkey;
  608. for (i = 0; i < device->pkey_tbl_len[port_num - start_port(device)]; ++i) {
  609. ret = ib_query_pkey(device, port_num, i, &tmp_pkey);
  610. if (ret)
  611. return ret;
  612. if ((pkey & 0x7fff) == (tmp_pkey & 0x7fff)) {
  613. *index = i;
  614. return 0;
  615. }
  616. }
  617. return -ENOENT;
  618. }
  619. EXPORT_SYMBOL(ib_find_pkey);
  620. static int __init ib_core_init(void)
  621. {
  622. int ret;
  623. ib_wq = alloc_workqueue("infiniband", 0, 0);
  624. if (!ib_wq)
  625. return -ENOMEM;
  626. ret = ib_sysfs_setup();
  627. if (ret) {
  628. printk(KERN_WARNING "Couldn't create InfiniBand device class\n");
  629. goto err;
  630. }
  631. ret = ibnl_init();
  632. if (ret) {
  633. printk(KERN_WARNING "Couldn't init IB netlink interface\n");
  634. goto err_sysfs;
  635. }
  636. ret = ib_cache_setup();
  637. if (ret) {
  638. printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n");
  639. goto err_nl;
  640. }
  641. return 0;
  642. err_nl:
  643. ibnl_cleanup();
  644. err_sysfs:
  645. ib_sysfs_cleanup();
  646. err:
  647. destroy_workqueue(ib_wq);
  648. return ret;
  649. }
  650. static void __exit ib_core_cleanup(void)
  651. {
  652. ib_cache_cleanup();
  653. ibnl_cleanup();
  654. ib_sysfs_cleanup();
  655. /* Make sure that any pending umem accounting work is done. */
  656. destroy_workqueue(ib_wq);
  657. }
  658. module_init(ib_core_init);
  659. module_exit(ib_core_cleanup);