fw-management.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722
  1. /*
  2. * Greybus Firmware Management Protocol Driver.
  3. *
  4. * Copyright 2016 Google Inc.
  5. * Copyright 2016 Linaro Ltd.
  6. *
  7. * Released under the GPLv2 only.
  8. */
  9. #include <linux/cdev.h>
  10. #include <linux/completion.h>
  11. #include <linux/firmware.h>
  12. #include <linux/fs.h>
  13. #include <linux/idr.h>
  14. #include <linux/ioctl.h>
  15. #include <linux/uaccess.h>
  16. #include "firmware.h"
  17. #include "greybus_firmware.h"
  18. #include "greybus.h"
  19. #define FW_MGMT_TIMEOUT_MS 1000
  20. struct fw_mgmt {
  21. struct device *parent;
  22. struct gb_connection *connection;
  23. struct kref kref;
  24. struct list_head node;
  25. /* Common id-map for interface and backend firmware requests */
  26. struct ida id_map;
  27. struct mutex mutex;
  28. struct completion completion;
  29. struct cdev cdev;
  30. struct device *class_device;
  31. dev_t dev_num;
  32. unsigned int timeout_jiffies;
  33. bool disabled; /* connection getting disabled */
  34. /* Interface Firmware specific fields */
  35. bool mode_switch_started;
  36. bool intf_fw_loaded;
  37. u8 intf_fw_request_id;
  38. u8 intf_fw_status;
  39. u16 intf_fw_major;
  40. u16 intf_fw_minor;
  41. /* Backend Firmware specific fields */
  42. u8 backend_fw_request_id;
  43. u8 backend_fw_status;
  44. };
  45. /*
  46. * Number of minor devices this driver supports.
  47. * There will be exactly one required per Interface.
  48. */
  49. #define NUM_MINORS U8_MAX
  50. static struct class *fw_mgmt_class;
  51. static dev_t fw_mgmt_dev_num;
  52. static DEFINE_IDA(fw_mgmt_minors_map);
  53. static LIST_HEAD(fw_mgmt_list);
  54. static DEFINE_MUTEX(list_mutex);
  55. static void fw_mgmt_kref_release(struct kref *kref)
  56. {
  57. struct fw_mgmt *fw_mgmt = container_of(kref, struct fw_mgmt, kref);
  58. ida_destroy(&fw_mgmt->id_map);
  59. kfree(fw_mgmt);
  60. }
  61. /*
  62. * All users of fw_mgmt take a reference (from within list_mutex lock), before
  63. * they get a pointer to play with. And the structure will be freed only after
  64. * the last user has put the reference to it.
  65. */
  66. static void put_fw_mgmt(struct fw_mgmt *fw_mgmt)
  67. {
  68. kref_put(&fw_mgmt->kref, fw_mgmt_kref_release);
  69. }
  70. /* Caller must call put_fw_mgmt() after using struct fw_mgmt */
  71. static struct fw_mgmt *get_fw_mgmt(struct cdev *cdev)
  72. {
  73. struct fw_mgmt *fw_mgmt;
  74. mutex_lock(&list_mutex);
  75. list_for_each_entry(fw_mgmt, &fw_mgmt_list, node) {
  76. if (&fw_mgmt->cdev == cdev) {
  77. kref_get(&fw_mgmt->kref);
  78. goto unlock;
  79. }
  80. }
  81. fw_mgmt = NULL;
  82. unlock:
  83. mutex_unlock(&list_mutex);
  84. return fw_mgmt;
  85. }
  86. static int fw_mgmt_interface_fw_version_operation(struct fw_mgmt *fw_mgmt,
  87. struct fw_mgmt_ioc_get_intf_version *fw_info)
  88. {
  89. struct gb_connection *connection = fw_mgmt->connection;
  90. struct gb_fw_mgmt_interface_fw_version_response response;
  91. int ret;
  92. ret = gb_operation_sync(connection,
  93. GB_FW_MGMT_TYPE_INTERFACE_FW_VERSION, NULL, 0,
  94. &response, sizeof(response));
  95. if (ret) {
  96. dev_err(fw_mgmt->parent,
  97. "failed to get interface firmware version (%d)\n", ret);
  98. return ret;
  99. }
  100. fw_info->major = le16_to_cpu(response.major);
  101. fw_info->minor = le16_to_cpu(response.minor);
  102. strncpy(fw_info->firmware_tag, response.firmware_tag,
  103. GB_FIRMWARE_TAG_MAX_SIZE);
  104. /*
  105. * The firmware-tag should be NULL terminated, otherwise throw error but
  106. * don't fail.
  107. */
  108. if (fw_info->firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE - 1] != '\0') {
  109. dev_err(fw_mgmt->parent,
  110. "fw-version: firmware-tag is not NULL terminated\n");
  111. fw_info->firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE - 1] = '\0';
  112. }
  113. return 0;
  114. }
  115. static int fw_mgmt_load_and_validate_operation(struct fw_mgmt *fw_mgmt,
  116. u8 load_method, const char *tag)
  117. {
  118. struct gb_fw_mgmt_load_and_validate_fw_request request;
  119. int ret;
  120. if (load_method != GB_FW_LOAD_METHOD_UNIPRO &&
  121. load_method != GB_FW_LOAD_METHOD_INTERNAL) {
  122. dev_err(fw_mgmt->parent,
  123. "invalid load-method (%d)\n", load_method);
  124. return -EINVAL;
  125. }
  126. request.load_method = load_method;
  127. strncpy(request.firmware_tag, tag, GB_FIRMWARE_TAG_MAX_SIZE);
  128. /*
  129. * The firmware-tag should be NULL terminated, otherwise throw error and
  130. * fail.
  131. */
  132. if (request.firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE - 1] != '\0') {
  133. dev_err(fw_mgmt->parent, "load-and-validate: firmware-tag is not NULL terminated\n");
  134. return -EINVAL;
  135. }
  136. /* Allocate ids from 1 to 255 (u8-max), 0 is an invalid id */
  137. ret = ida_simple_get(&fw_mgmt->id_map, 1, 256, GFP_KERNEL);
  138. if (ret < 0) {
  139. dev_err(fw_mgmt->parent, "failed to allocate request id (%d)\n",
  140. ret);
  141. return ret;
  142. }
  143. fw_mgmt->intf_fw_request_id = ret;
  144. fw_mgmt->intf_fw_loaded = false;
  145. request.request_id = ret;
  146. ret = gb_operation_sync(fw_mgmt->connection,
  147. GB_FW_MGMT_TYPE_LOAD_AND_VALIDATE_FW, &request,
  148. sizeof(request), NULL, 0);
  149. if (ret) {
  150. ida_simple_remove(&fw_mgmt->id_map,
  151. fw_mgmt->intf_fw_request_id);
  152. fw_mgmt->intf_fw_request_id = 0;
  153. dev_err(fw_mgmt->parent,
  154. "load and validate firmware request failed (%d)\n",
  155. ret);
  156. return ret;
  157. }
  158. return 0;
  159. }
  160. static int fw_mgmt_interface_fw_loaded_operation(struct gb_operation *op)
  161. {
  162. struct gb_connection *connection = op->connection;
  163. struct fw_mgmt *fw_mgmt = gb_connection_get_data(connection);
  164. struct gb_fw_mgmt_loaded_fw_request *request;
  165. /* No pending load and validate request ? */
  166. if (!fw_mgmt->intf_fw_request_id) {
  167. dev_err(fw_mgmt->parent,
  168. "unexpected firmware loaded request received\n");
  169. return -ENODEV;
  170. }
  171. if (op->request->payload_size != sizeof(*request)) {
  172. dev_err(fw_mgmt->parent, "illegal size of firmware loaded request (%zu != %zu)\n",
  173. op->request->payload_size, sizeof(*request));
  174. return -EINVAL;
  175. }
  176. request = op->request->payload;
  177. /* Invalid request-id ? */
  178. if (request->request_id != fw_mgmt->intf_fw_request_id) {
  179. dev_err(fw_mgmt->parent, "invalid request id for firmware loaded request (%02u != %02u)\n",
  180. fw_mgmt->intf_fw_request_id, request->request_id);
  181. return -ENODEV;
  182. }
  183. ida_simple_remove(&fw_mgmt->id_map, fw_mgmt->intf_fw_request_id);
  184. fw_mgmt->intf_fw_request_id = 0;
  185. fw_mgmt->intf_fw_status = request->status;
  186. fw_mgmt->intf_fw_major = le16_to_cpu(request->major);
  187. fw_mgmt->intf_fw_minor = le16_to_cpu(request->minor);
  188. if (fw_mgmt->intf_fw_status == GB_FW_LOAD_STATUS_FAILED)
  189. dev_err(fw_mgmt->parent,
  190. "failed to load interface firmware, status:%02x\n",
  191. fw_mgmt->intf_fw_status);
  192. else if (fw_mgmt->intf_fw_status == GB_FW_LOAD_STATUS_VALIDATION_FAILED)
  193. dev_err(fw_mgmt->parent,
  194. "failed to validate interface firmware, status:%02x\n",
  195. fw_mgmt->intf_fw_status);
  196. else
  197. fw_mgmt->intf_fw_loaded = true;
  198. complete(&fw_mgmt->completion);
  199. return 0;
  200. }
  201. static int fw_mgmt_backend_fw_version_operation(struct fw_mgmt *fw_mgmt,
  202. struct fw_mgmt_ioc_get_backend_version *fw_info)
  203. {
  204. struct gb_connection *connection = fw_mgmt->connection;
  205. struct gb_fw_mgmt_backend_fw_version_request request;
  206. struct gb_fw_mgmt_backend_fw_version_response response;
  207. int ret;
  208. strncpy(request.firmware_tag, fw_info->firmware_tag,
  209. GB_FIRMWARE_TAG_MAX_SIZE);
  210. /*
  211. * The firmware-tag should be NULL terminated, otherwise throw error and
  212. * fail.
  213. */
  214. if (request.firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE - 1] != '\0') {
  215. dev_err(fw_mgmt->parent, "backend-version: firmware-tag is not NULL terminated\n");
  216. return -EINVAL;
  217. }
  218. ret = gb_operation_sync(connection,
  219. GB_FW_MGMT_TYPE_BACKEND_FW_VERSION, &request,
  220. sizeof(request), &response, sizeof(response));
  221. if (ret) {
  222. dev_err(fw_mgmt->parent, "failed to get version of %s backend firmware (%d)\n",
  223. fw_info->firmware_tag, ret);
  224. return ret;
  225. }
  226. fw_info->status = response.status;
  227. /* Reset version as that should be non-zero only for success case */
  228. fw_info->major = 0;
  229. fw_info->minor = 0;
  230. switch (fw_info->status) {
  231. case GB_FW_BACKEND_VERSION_STATUS_SUCCESS:
  232. fw_info->major = le16_to_cpu(response.major);
  233. fw_info->minor = le16_to_cpu(response.minor);
  234. break;
  235. case GB_FW_BACKEND_VERSION_STATUS_NOT_AVAILABLE:
  236. case GB_FW_BACKEND_VERSION_STATUS_RETRY:
  237. break;
  238. case GB_FW_BACKEND_VERSION_STATUS_NOT_SUPPORTED:
  239. dev_err(fw_mgmt->parent,
  240. "Firmware with tag %s is not supported by Interface\n",
  241. fw_info->firmware_tag);
  242. break;
  243. default:
  244. dev_err(fw_mgmt->parent, "Invalid status received: %u\n",
  245. fw_info->status);
  246. }
  247. return 0;
  248. }
  249. static int fw_mgmt_backend_fw_update_operation(struct fw_mgmt *fw_mgmt,
  250. char *tag)
  251. {
  252. struct gb_fw_mgmt_backend_fw_update_request request;
  253. int ret;
  254. strncpy(request.firmware_tag, tag, GB_FIRMWARE_TAG_MAX_SIZE);
  255. /*
  256. * The firmware-tag should be NULL terminated, otherwise throw error and
  257. * fail.
  258. */
  259. if (request.firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE - 1] != '\0') {
  260. dev_err(fw_mgmt->parent, "backend-update: firmware-tag is not NULL terminated\n");
  261. return -EINVAL;
  262. }
  263. /* Allocate ids from 1 to 255 (u8-max), 0 is an invalid id */
  264. ret = ida_simple_get(&fw_mgmt->id_map, 1, 256, GFP_KERNEL);
  265. if (ret < 0) {
  266. dev_err(fw_mgmt->parent, "failed to allocate request id (%d)\n",
  267. ret);
  268. return ret;
  269. }
  270. fw_mgmt->backend_fw_request_id = ret;
  271. request.request_id = ret;
  272. ret = gb_operation_sync(fw_mgmt->connection,
  273. GB_FW_MGMT_TYPE_BACKEND_FW_UPDATE, &request,
  274. sizeof(request), NULL, 0);
  275. if (ret) {
  276. ida_simple_remove(&fw_mgmt->id_map,
  277. fw_mgmt->backend_fw_request_id);
  278. fw_mgmt->backend_fw_request_id = 0;
  279. dev_err(fw_mgmt->parent,
  280. "backend %s firmware update request failed (%d)\n", tag,
  281. ret);
  282. return ret;
  283. }
  284. return 0;
  285. }
  286. static int fw_mgmt_backend_fw_updated_operation(struct gb_operation *op)
  287. {
  288. struct gb_connection *connection = op->connection;
  289. struct fw_mgmt *fw_mgmt = gb_connection_get_data(connection);
  290. struct gb_fw_mgmt_backend_fw_updated_request *request;
  291. /* No pending load and validate request ? */
  292. if (!fw_mgmt->backend_fw_request_id) {
  293. dev_err(fw_mgmt->parent, "unexpected backend firmware updated request received\n");
  294. return -ENODEV;
  295. }
  296. if (op->request->payload_size != sizeof(*request)) {
  297. dev_err(fw_mgmt->parent, "illegal size of backend firmware updated request (%zu != %zu)\n",
  298. op->request->payload_size, sizeof(*request));
  299. return -EINVAL;
  300. }
  301. request = op->request->payload;
  302. /* Invalid request-id ? */
  303. if (request->request_id != fw_mgmt->backend_fw_request_id) {
  304. dev_err(fw_mgmt->parent, "invalid request id for backend firmware updated request (%02u != %02u)\n",
  305. fw_mgmt->backend_fw_request_id, request->request_id);
  306. return -ENODEV;
  307. }
  308. ida_simple_remove(&fw_mgmt->id_map, fw_mgmt->backend_fw_request_id);
  309. fw_mgmt->backend_fw_request_id = 0;
  310. fw_mgmt->backend_fw_status = request->status;
  311. if ((fw_mgmt->backend_fw_status != GB_FW_BACKEND_FW_STATUS_SUCCESS) &&
  312. (fw_mgmt->backend_fw_status != GB_FW_BACKEND_FW_STATUS_RETRY))
  313. dev_err(fw_mgmt->parent,
  314. "failed to load backend firmware: %02x\n",
  315. fw_mgmt->backend_fw_status);
  316. complete(&fw_mgmt->completion);
  317. return 0;
  318. }
  319. /* Char device fops */
  320. static int fw_mgmt_open(struct inode *inode, struct file *file)
  321. {
  322. struct fw_mgmt *fw_mgmt = get_fw_mgmt(inode->i_cdev);
  323. /* fw_mgmt structure can't get freed until file descriptor is closed */
  324. if (fw_mgmt) {
  325. file->private_data = fw_mgmt;
  326. return 0;
  327. }
  328. return -ENODEV;
  329. }
  330. static int fw_mgmt_release(struct inode *inode, struct file *file)
  331. {
  332. struct fw_mgmt *fw_mgmt = file->private_data;
  333. put_fw_mgmt(fw_mgmt);
  334. return 0;
  335. }
  336. static int fw_mgmt_ioctl(struct fw_mgmt *fw_mgmt, unsigned int cmd,
  337. void __user *buf)
  338. {
  339. struct fw_mgmt_ioc_get_intf_version intf_fw_info;
  340. struct fw_mgmt_ioc_get_backend_version backend_fw_info;
  341. struct fw_mgmt_ioc_intf_load_and_validate intf_load;
  342. struct fw_mgmt_ioc_backend_fw_update backend_update;
  343. unsigned int timeout;
  344. int ret;
  345. /* Reject any operations after mode-switch has started */
  346. if (fw_mgmt->mode_switch_started)
  347. return -EBUSY;
  348. switch (cmd) {
  349. case FW_MGMT_IOC_GET_INTF_FW:
  350. ret = fw_mgmt_interface_fw_version_operation(fw_mgmt,
  351. &intf_fw_info);
  352. if (ret)
  353. return ret;
  354. if (copy_to_user(buf, &intf_fw_info, sizeof(intf_fw_info)))
  355. return -EFAULT;
  356. return 0;
  357. case FW_MGMT_IOC_GET_BACKEND_FW:
  358. if (copy_from_user(&backend_fw_info, buf,
  359. sizeof(backend_fw_info)))
  360. return -EFAULT;
  361. ret = fw_mgmt_backend_fw_version_operation(fw_mgmt,
  362. &backend_fw_info);
  363. if (ret)
  364. return ret;
  365. if (copy_to_user(buf, &backend_fw_info,
  366. sizeof(backend_fw_info)))
  367. return -EFAULT;
  368. return 0;
  369. case FW_MGMT_IOC_INTF_LOAD_AND_VALIDATE:
  370. if (copy_from_user(&intf_load, buf, sizeof(intf_load)))
  371. return -EFAULT;
  372. ret = fw_mgmt_load_and_validate_operation(fw_mgmt,
  373. intf_load.load_method, intf_load.firmware_tag);
  374. if (ret)
  375. return ret;
  376. if (!wait_for_completion_timeout(&fw_mgmt->completion,
  377. fw_mgmt->timeout_jiffies)) {
  378. dev_err(fw_mgmt->parent, "timed out waiting for firmware load and validation to finish\n");
  379. return -ETIMEDOUT;
  380. }
  381. intf_load.status = fw_mgmt->intf_fw_status;
  382. intf_load.major = fw_mgmt->intf_fw_major;
  383. intf_load.minor = fw_mgmt->intf_fw_minor;
  384. if (copy_to_user(buf, &intf_load, sizeof(intf_load)))
  385. return -EFAULT;
  386. return 0;
  387. case FW_MGMT_IOC_INTF_BACKEND_FW_UPDATE:
  388. if (copy_from_user(&backend_update, buf,
  389. sizeof(backend_update)))
  390. return -EFAULT;
  391. ret = fw_mgmt_backend_fw_update_operation(fw_mgmt,
  392. backend_update.firmware_tag);
  393. if (ret)
  394. return ret;
  395. if (!wait_for_completion_timeout(&fw_mgmt->completion,
  396. fw_mgmt->timeout_jiffies)) {
  397. dev_err(fw_mgmt->parent, "timed out waiting for backend firmware update to finish\n");
  398. return -ETIMEDOUT;
  399. }
  400. backend_update.status = fw_mgmt->backend_fw_status;
  401. if (copy_to_user(buf, &backend_update, sizeof(backend_update)))
  402. return -EFAULT;
  403. return 0;
  404. case FW_MGMT_IOC_SET_TIMEOUT_MS:
  405. if (get_user(timeout, (unsigned int __user *)buf))
  406. return -EFAULT;
  407. if (!timeout) {
  408. dev_err(fw_mgmt->parent, "timeout can't be zero\n");
  409. return -EINVAL;
  410. }
  411. fw_mgmt->timeout_jiffies = msecs_to_jiffies(timeout);
  412. return 0;
  413. case FW_MGMT_IOC_MODE_SWITCH:
  414. if (!fw_mgmt->intf_fw_loaded) {
  415. dev_err(fw_mgmt->parent,
  416. "Firmware not loaded for mode-switch\n");
  417. return -EPERM;
  418. }
  419. /*
  420. * Disallow new ioctls as the fw-core bundle driver is going to
  421. * get disconnected soon and the character device will get
  422. * removed.
  423. */
  424. fw_mgmt->mode_switch_started = true;
  425. ret = gb_interface_request_mode_switch(fw_mgmt->connection->intf);
  426. if (ret) {
  427. dev_err(fw_mgmt->parent, "Mode-switch failed: %d\n",
  428. ret);
  429. fw_mgmt->mode_switch_started = false;
  430. return ret;
  431. }
  432. return 0;
  433. default:
  434. return -ENOTTY;
  435. }
  436. }
  437. static long fw_mgmt_ioctl_unlocked(struct file *file, unsigned int cmd,
  438. unsigned long arg)
  439. {
  440. struct fw_mgmt *fw_mgmt = file->private_data;
  441. struct gb_bundle *bundle = fw_mgmt->connection->bundle;
  442. int ret = -ENODEV;
  443. /*
  444. * Serialize ioctls.
  445. *
  446. * We don't want the user to do few operations in parallel. For example,
  447. * updating Interface firmware in parallel for the same Interface. There
  448. * is no need to do things in parallel for speed and we can avoid having
  449. * complicated code for now.
  450. *
  451. * This is also used to protect ->disabled, which is used to check if
  452. * the connection is getting disconnected, so that we don't start any
  453. * new operations.
  454. */
  455. mutex_lock(&fw_mgmt->mutex);
  456. if (!fw_mgmt->disabled) {
  457. ret = gb_pm_runtime_get_sync(bundle);
  458. if (!ret) {
  459. ret = fw_mgmt_ioctl(fw_mgmt, cmd, (void __user *)arg);
  460. gb_pm_runtime_put_autosuspend(bundle);
  461. }
  462. }
  463. mutex_unlock(&fw_mgmt->mutex);
  464. return ret;
  465. }
  466. static const struct file_operations fw_mgmt_fops = {
  467. .owner = THIS_MODULE,
  468. .open = fw_mgmt_open,
  469. .release = fw_mgmt_release,
  470. .unlocked_ioctl = fw_mgmt_ioctl_unlocked,
  471. };
  472. int gb_fw_mgmt_request_handler(struct gb_operation *op)
  473. {
  474. u8 type = op->type;
  475. switch (type) {
  476. case GB_FW_MGMT_TYPE_LOADED_FW:
  477. return fw_mgmt_interface_fw_loaded_operation(op);
  478. case GB_FW_MGMT_TYPE_BACKEND_FW_UPDATED:
  479. return fw_mgmt_backend_fw_updated_operation(op);
  480. default:
  481. dev_err(&op->connection->bundle->dev,
  482. "unsupported request: %u\n", type);
  483. return -EINVAL;
  484. }
  485. }
  486. int gb_fw_mgmt_connection_init(struct gb_connection *connection)
  487. {
  488. struct fw_mgmt *fw_mgmt;
  489. int ret, minor;
  490. if (!connection)
  491. return 0;
  492. fw_mgmt = kzalloc(sizeof(*fw_mgmt), GFP_KERNEL);
  493. if (!fw_mgmt)
  494. return -ENOMEM;
  495. fw_mgmt->parent = &connection->bundle->dev;
  496. fw_mgmt->timeout_jiffies = msecs_to_jiffies(FW_MGMT_TIMEOUT_MS);
  497. fw_mgmt->connection = connection;
  498. gb_connection_set_data(connection, fw_mgmt);
  499. init_completion(&fw_mgmt->completion);
  500. ida_init(&fw_mgmt->id_map);
  501. mutex_init(&fw_mgmt->mutex);
  502. kref_init(&fw_mgmt->kref);
  503. mutex_lock(&list_mutex);
  504. list_add(&fw_mgmt->node, &fw_mgmt_list);
  505. mutex_unlock(&list_mutex);
  506. ret = gb_connection_enable(connection);
  507. if (ret)
  508. goto err_list_del;
  509. minor = ida_simple_get(&fw_mgmt_minors_map, 0, NUM_MINORS, GFP_KERNEL);
  510. if (minor < 0) {
  511. ret = minor;
  512. goto err_connection_disable;
  513. }
  514. /* Add a char device to allow userspace to interact with fw-mgmt */
  515. fw_mgmt->dev_num = MKDEV(MAJOR(fw_mgmt_dev_num), minor);
  516. cdev_init(&fw_mgmt->cdev, &fw_mgmt_fops);
  517. ret = cdev_add(&fw_mgmt->cdev, fw_mgmt->dev_num, 1);
  518. if (ret)
  519. goto err_remove_ida;
  520. /* Add a soft link to the previously added char-dev within the bundle */
  521. fw_mgmt->class_device = device_create(fw_mgmt_class, fw_mgmt->parent,
  522. fw_mgmt->dev_num, NULL,
  523. "gb-fw-mgmt-%d", minor);
  524. if (IS_ERR(fw_mgmt->class_device)) {
  525. ret = PTR_ERR(fw_mgmt->class_device);
  526. goto err_del_cdev;
  527. }
  528. return 0;
  529. err_del_cdev:
  530. cdev_del(&fw_mgmt->cdev);
  531. err_remove_ida:
  532. ida_simple_remove(&fw_mgmt_minors_map, minor);
  533. err_connection_disable:
  534. gb_connection_disable(connection);
  535. err_list_del:
  536. mutex_lock(&list_mutex);
  537. list_del(&fw_mgmt->node);
  538. mutex_unlock(&list_mutex);
  539. put_fw_mgmt(fw_mgmt);
  540. return ret;
  541. }
  542. void gb_fw_mgmt_connection_exit(struct gb_connection *connection)
  543. {
  544. struct fw_mgmt *fw_mgmt;
  545. if (!connection)
  546. return;
  547. fw_mgmt = gb_connection_get_data(connection);
  548. device_destroy(fw_mgmt_class, fw_mgmt->dev_num);
  549. cdev_del(&fw_mgmt->cdev);
  550. ida_simple_remove(&fw_mgmt_minors_map, MINOR(fw_mgmt->dev_num));
  551. /*
  552. * Disallow any new ioctl operations on the char device and wait for
  553. * existing ones to finish.
  554. */
  555. mutex_lock(&fw_mgmt->mutex);
  556. fw_mgmt->disabled = true;
  557. mutex_unlock(&fw_mgmt->mutex);
  558. /* All pending greybus operations should have finished by now */
  559. gb_connection_disable(fw_mgmt->connection);
  560. /* Disallow new users to get access to the fw_mgmt structure */
  561. mutex_lock(&list_mutex);
  562. list_del(&fw_mgmt->node);
  563. mutex_unlock(&list_mutex);
  564. /*
  565. * All current users of fw_mgmt would have taken a reference to it by
  566. * now, we can drop our reference and wait the last user will get
  567. * fw_mgmt freed.
  568. */
  569. put_fw_mgmt(fw_mgmt);
  570. }
  571. int fw_mgmt_init(void)
  572. {
  573. int ret;
  574. fw_mgmt_class = class_create(THIS_MODULE, "gb_fw_mgmt");
  575. if (IS_ERR(fw_mgmt_class))
  576. return PTR_ERR(fw_mgmt_class);
  577. ret = alloc_chrdev_region(&fw_mgmt_dev_num, 0, NUM_MINORS,
  578. "gb_fw_mgmt");
  579. if (ret)
  580. goto err_remove_class;
  581. return 0;
  582. err_remove_class:
  583. class_destroy(fw_mgmt_class);
  584. return ret;
  585. }
  586. void fw_mgmt_exit(void)
  587. {
  588. unregister_chrdev_region(fw_mgmt_dev_num, NUM_MINORS);
  589. class_destroy(fw_mgmt_class);
  590. ida_destroy(&fw_mgmt_minors_map);
  591. }