authentication.c 10.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430
  1. /*
  2. * Greybus Component Authentication Protocol (CAP) Driver.
  3. *
  4. * Copyright 2016 Google Inc.
  5. * Copyright 2016 Linaro Ltd.
  6. *
  7. * Released under the GPLv2 only.
  8. */
  9. #include "greybus.h"
  10. #include <linux/cdev.h>
  11. #include <linux/fs.h>
  12. #include <linux/ioctl.h>
  13. #include <linux/uaccess.h>
  14. #include "greybus_authentication.h"
  15. #include "firmware.h"
  16. #include "greybus.h"
  17. #define CAP_TIMEOUT_MS 1000
  18. /*
  19. * Number of minor devices this driver supports.
  20. * There will be exactly one required per Interface.
  21. */
  22. #define NUM_MINORS U8_MAX
  23. struct gb_cap {
  24. struct device *parent;
  25. struct gb_connection *connection;
  26. struct kref kref;
  27. struct list_head node;
  28. bool disabled; /* connection getting disabled */
  29. struct mutex mutex;
  30. struct cdev cdev;
  31. struct device *class_device;
  32. dev_t dev_num;
  33. };
  34. static struct class *cap_class;
  35. static dev_t cap_dev_num;
  36. static DEFINE_IDA(cap_minors_map);
  37. static LIST_HEAD(cap_list);
  38. static DEFINE_MUTEX(list_mutex);
  39. static void cap_kref_release(struct kref *kref)
  40. {
  41. struct gb_cap *cap = container_of(kref, struct gb_cap, kref);
  42. kfree(cap);
  43. }
  44. /*
  45. * All users of cap take a reference (from within list_mutex lock), before
  46. * they get a pointer to play with. And the structure will be freed only after
  47. * the last user has put the reference to it.
  48. */
  49. static void put_cap(struct gb_cap *cap)
  50. {
  51. kref_put(&cap->kref, cap_kref_release);
  52. }
  53. /* Caller must call put_cap() after using struct gb_cap */
  54. static struct gb_cap *get_cap(struct cdev *cdev)
  55. {
  56. struct gb_cap *cap;
  57. mutex_lock(&list_mutex);
  58. list_for_each_entry(cap, &cap_list, node) {
  59. if (&cap->cdev == cdev) {
  60. kref_get(&cap->kref);
  61. goto unlock;
  62. }
  63. }
  64. cap = NULL;
  65. unlock:
  66. mutex_unlock(&list_mutex);
  67. return cap;
  68. }
  69. static int cap_get_endpoint_uid(struct gb_cap *cap, u8 *euid)
  70. {
  71. struct gb_connection *connection = cap->connection;
  72. struct gb_cap_get_endpoint_uid_response response;
  73. int ret;
  74. ret = gb_operation_sync(connection, GB_CAP_TYPE_GET_ENDPOINT_UID, NULL,
  75. 0, &response, sizeof(response));
  76. if (ret) {
  77. dev_err(cap->parent, "failed to get endpoint uid (%d)\n", ret);
  78. return ret;
  79. }
  80. memcpy(euid, response.uid, sizeof(response.uid));
  81. return 0;
  82. }
  83. static int cap_get_ims_certificate(struct gb_cap *cap, u32 class, u32 id,
  84. u8 *certificate, u32 *size, u8 *result)
  85. {
  86. struct gb_connection *connection = cap->connection;
  87. struct gb_cap_get_ims_certificate_request *request;
  88. struct gb_cap_get_ims_certificate_response *response;
  89. size_t max_size = gb_operation_get_payload_size_max(connection);
  90. struct gb_operation *op;
  91. int ret;
  92. op = gb_operation_create_flags(connection,
  93. GB_CAP_TYPE_GET_IMS_CERTIFICATE,
  94. sizeof(*request), max_size,
  95. GB_OPERATION_FLAG_SHORT_RESPONSE,
  96. GFP_KERNEL);
  97. if (!op)
  98. return -ENOMEM;
  99. request = op->request->payload;
  100. request->certificate_class = cpu_to_le32(class);
  101. request->certificate_id = cpu_to_le32(id);
  102. ret = gb_operation_request_send_sync(op);
  103. if (ret) {
  104. dev_err(cap->parent, "failed to get certificate (%d)\n", ret);
  105. goto done;
  106. }
  107. response = op->response->payload;
  108. *result = response->result_code;
  109. *size = op->response->payload_size - sizeof(*response);
  110. memcpy(certificate, response->certificate, *size);
  111. done:
  112. gb_operation_put(op);
  113. return ret;
  114. }
  115. static int cap_authenticate(struct gb_cap *cap, u32 auth_type, u8 *uid,
  116. u8 *challenge, u8 *result, u8 *auth_response,
  117. u32 *signature_size, u8 *signature)
  118. {
  119. struct gb_connection *connection = cap->connection;
  120. struct gb_cap_authenticate_request *request;
  121. struct gb_cap_authenticate_response *response;
  122. size_t max_size = gb_operation_get_payload_size_max(connection);
  123. struct gb_operation *op;
  124. int ret;
  125. op = gb_operation_create_flags(connection, GB_CAP_TYPE_AUTHENTICATE,
  126. sizeof(*request), max_size,
  127. GB_OPERATION_FLAG_SHORT_RESPONSE,
  128. GFP_KERNEL);
  129. if (!op)
  130. return -ENOMEM;
  131. request = op->request->payload;
  132. request->auth_type = cpu_to_le32(auth_type);
  133. memcpy(request->uid, uid, sizeof(request->uid));
  134. memcpy(request->challenge, challenge, sizeof(request->challenge));
  135. ret = gb_operation_request_send_sync(op);
  136. if (ret) {
  137. dev_err(cap->parent, "failed to authenticate (%d)\n", ret);
  138. goto done;
  139. }
  140. response = op->response->payload;
  141. *result = response->result_code;
  142. *signature_size = op->response->payload_size - sizeof(*response);
  143. memcpy(auth_response, response->response, sizeof(response->response));
  144. memcpy(signature, response->signature, *signature_size);
  145. done:
  146. gb_operation_put(op);
  147. return ret;
  148. }
  149. /* Char device fops */
  150. static int cap_open(struct inode *inode, struct file *file)
  151. {
  152. struct gb_cap *cap = get_cap(inode->i_cdev);
  153. /* cap structure can't get freed until file descriptor is closed */
  154. if (cap) {
  155. file->private_data = cap;
  156. return 0;
  157. }
  158. return -ENODEV;
  159. }
  160. static int cap_release(struct inode *inode, struct file *file)
  161. {
  162. struct gb_cap *cap = file->private_data;
  163. put_cap(cap);
  164. return 0;
  165. }
  166. static int cap_ioctl(struct gb_cap *cap, unsigned int cmd,
  167. void __user *buf)
  168. {
  169. struct cap_ioc_get_endpoint_uid endpoint_uid;
  170. struct cap_ioc_get_ims_certificate *ims_cert;
  171. struct cap_ioc_authenticate *authenticate;
  172. size_t size;
  173. int ret;
  174. switch (cmd) {
  175. case CAP_IOC_GET_ENDPOINT_UID:
  176. ret = cap_get_endpoint_uid(cap, endpoint_uid.uid);
  177. if (ret)
  178. return ret;
  179. if (copy_to_user(buf, &endpoint_uid, sizeof(endpoint_uid)))
  180. return -EFAULT;
  181. return 0;
  182. case CAP_IOC_GET_IMS_CERTIFICATE:
  183. size = sizeof(*ims_cert);
  184. ims_cert = memdup_user(buf, size);
  185. if (IS_ERR(ims_cert))
  186. return PTR_ERR(ims_cert);
  187. ret = cap_get_ims_certificate(cap, ims_cert->certificate_class,
  188. ims_cert->certificate_id,
  189. ims_cert->certificate,
  190. &ims_cert->cert_size,
  191. &ims_cert->result_code);
  192. if (!ret && copy_to_user(buf, ims_cert, size))
  193. ret = -EFAULT;
  194. kfree(ims_cert);
  195. return ret;
  196. case CAP_IOC_AUTHENTICATE:
  197. size = sizeof(*authenticate);
  198. authenticate = memdup_user(buf, size);
  199. if (IS_ERR(authenticate))
  200. return PTR_ERR(authenticate);
  201. ret = cap_authenticate(cap, authenticate->auth_type,
  202. authenticate->uid,
  203. authenticate->challenge,
  204. &authenticate->result_code,
  205. authenticate->response,
  206. &authenticate->signature_size,
  207. authenticate->signature);
  208. if (!ret && copy_to_user(buf, authenticate, size))
  209. ret = -EFAULT;
  210. kfree(authenticate);
  211. return ret;
  212. default:
  213. return -ENOTTY;
  214. }
  215. }
  216. static long cap_ioctl_unlocked(struct file *file, unsigned int cmd,
  217. unsigned long arg)
  218. {
  219. struct gb_cap *cap = file->private_data;
  220. struct gb_bundle *bundle = cap->connection->bundle;
  221. int ret = -ENODEV;
  222. /*
  223. * Serialize ioctls.
  224. *
  225. * We don't want the user to do multiple authentication operations in
  226. * parallel.
  227. *
  228. * This is also used to protect ->disabled, which is used to check if
  229. * the connection is getting disconnected, so that we don't start any
  230. * new operations.
  231. */
  232. mutex_lock(&cap->mutex);
  233. if (!cap->disabled) {
  234. ret = gb_pm_runtime_get_sync(bundle);
  235. if (!ret) {
  236. ret = cap_ioctl(cap, cmd, (void __user *)arg);
  237. gb_pm_runtime_put_autosuspend(bundle);
  238. }
  239. }
  240. mutex_unlock(&cap->mutex);
  241. return ret;
  242. }
  243. static const struct file_operations cap_fops = {
  244. .owner = THIS_MODULE,
  245. .open = cap_open,
  246. .release = cap_release,
  247. .unlocked_ioctl = cap_ioctl_unlocked,
  248. };
  249. int gb_cap_connection_init(struct gb_connection *connection)
  250. {
  251. struct gb_cap *cap;
  252. int ret, minor;
  253. if (!connection)
  254. return 0;
  255. cap = kzalloc(sizeof(*cap), GFP_KERNEL);
  256. if (!cap)
  257. return -ENOMEM;
  258. cap->parent = &connection->bundle->dev;
  259. cap->connection = connection;
  260. mutex_init(&cap->mutex);
  261. gb_connection_set_data(connection, cap);
  262. kref_init(&cap->kref);
  263. mutex_lock(&list_mutex);
  264. list_add(&cap->node, &cap_list);
  265. mutex_unlock(&list_mutex);
  266. ret = gb_connection_enable(connection);
  267. if (ret)
  268. goto err_list_del;
  269. minor = ida_simple_get(&cap_minors_map, 0, NUM_MINORS, GFP_KERNEL);
  270. if (minor < 0) {
  271. ret = minor;
  272. goto err_connection_disable;
  273. }
  274. /* Add a char device to allow userspace to interact with cap */
  275. cap->dev_num = MKDEV(MAJOR(cap_dev_num), minor);
  276. cdev_init(&cap->cdev, &cap_fops);
  277. ret = cdev_add(&cap->cdev, cap->dev_num, 1);
  278. if (ret)
  279. goto err_remove_ida;
  280. /* Add a soft link to the previously added char-dev within the bundle */
  281. cap->class_device = device_create(cap_class, cap->parent, cap->dev_num,
  282. NULL, "gb-authenticate-%d", minor);
  283. if (IS_ERR(cap->class_device)) {
  284. ret = PTR_ERR(cap->class_device);
  285. goto err_del_cdev;
  286. }
  287. return 0;
  288. err_del_cdev:
  289. cdev_del(&cap->cdev);
  290. err_remove_ida:
  291. ida_simple_remove(&cap_minors_map, minor);
  292. err_connection_disable:
  293. gb_connection_disable(connection);
  294. err_list_del:
  295. mutex_lock(&list_mutex);
  296. list_del(&cap->node);
  297. mutex_unlock(&list_mutex);
  298. put_cap(cap);
  299. return ret;
  300. }
  301. void gb_cap_connection_exit(struct gb_connection *connection)
  302. {
  303. struct gb_cap *cap;
  304. if (!connection)
  305. return;
  306. cap = gb_connection_get_data(connection);
  307. device_destroy(cap_class, cap->dev_num);
  308. cdev_del(&cap->cdev);
  309. ida_simple_remove(&cap_minors_map, MINOR(cap->dev_num));
  310. /*
  311. * Disallow any new ioctl operations on the char device and wait for
  312. * existing ones to finish.
  313. */
  314. mutex_lock(&cap->mutex);
  315. cap->disabled = true;
  316. mutex_unlock(&cap->mutex);
  317. /* All pending greybus operations should have finished by now */
  318. gb_connection_disable(cap->connection);
  319. /* Disallow new users to get access to the cap structure */
  320. mutex_lock(&list_mutex);
  321. list_del(&cap->node);
  322. mutex_unlock(&list_mutex);
  323. /*
  324. * All current users of cap would have taken a reference to it by
  325. * now, we can drop our reference and wait the last user will get
  326. * cap freed.
  327. */
  328. put_cap(cap);
  329. }
  330. int cap_init(void)
  331. {
  332. int ret;
  333. cap_class = class_create(THIS_MODULE, "gb_authenticate");
  334. if (IS_ERR(cap_class))
  335. return PTR_ERR(cap_class);
  336. ret = alloc_chrdev_region(&cap_dev_num, 0, NUM_MINORS,
  337. "gb_authenticate");
  338. if (ret)
  339. goto err_remove_class;
  340. return 0;
  341. err_remove_class:
  342. class_destroy(cap_class);
  343. return ret;
  344. }
  345. void cap_exit(void)
  346. {
  347. unregister_chrdev_region(cap_dev_num, NUM_MINORS);
  348. class_destroy(cap_class);
  349. ida_destroy(&cap_minors_map);
  350. }