flash.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543
  1. #include <linux/kernel.h>
  2. #include <linux/fs.h>
  3. #include <linux/semaphore.h>
  4. #include <linux/slab.h>
  5. #include <linux/uaccess.h>
  6. #include <asm/rtas.h>
  7. #include "cxl.h"
  8. #include "hcalls.h"
  9. #define DOWNLOAD_IMAGE 1
  10. #define VALIDATE_IMAGE 2
  11. struct ai_header {
  12. u16 version;
  13. u8 reserved0[6];
  14. u16 vendor;
  15. u16 device;
  16. u16 subsystem_vendor;
  17. u16 subsystem;
  18. u64 image_offset;
  19. u64 image_length;
  20. u8 reserved1[96];
  21. };
  22. static struct semaphore sem;
  23. static unsigned long *buffer[CXL_AI_MAX_ENTRIES];
  24. static struct sg_list *le;
  25. static u64 continue_token;
  26. static unsigned int transfer;
  27. struct update_props_workarea {
  28. __be32 phandle;
  29. __be32 state;
  30. __be64 reserved;
  31. __be32 nprops;
  32. } __packed;
  33. struct update_nodes_workarea {
  34. __be32 state;
  35. __be64 unit_address;
  36. __be32 reserved;
  37. } __packed;
  38. #define DEVICE_SCOPE 3
  39. #define NODE_ACTION_MASK 0xff000000
  40. #define NODE_COUNT_MASK 0x00ffffff
  41. #define OPCODE_DELETE 0x01000000
  42. #define OPCODE_UPDATE 0x02000000
  43. #define OPCODE_ADD 0x03000000
  44. static int rcall(int token, char *buf, s32 scope)
  45. {
  46. int rc;
  47. spin_lock(&rtas_data_buf_lock);
  48. memcpy(rtas_data_buf, buf, RTAS_DATA_BUF_SIZE);
  49. rc = rtas_call(token, 2, 1, NULL, rtas_data_buf, scope);
  50. memcpy(buf, rtas_data_buf, RTAS_DATA_BUF_SIZE);
  51. spin_unlock(&rtas_data_buf_lock);
  52. return rc;
  53. }
  54. static int update_property(struct device_node *dn, const char *name,
  55. u32 vd, char *value)
  56. {
  57. struct property *new_prop;
  58. u32 *val;
  59. int rc;
  60. new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
  61. if (!new_prop)
  62. return -ENOMEM;
  63. new_prop->name = kstrdup(name, GFP_KERNEL);
  64. if (!new_prop->name) {
  65. kfree(new_prop);
  66. return -ENOMEM;
  67. }
  68. new_prop->length = vd;
  69. new_prop->value = kzalloc(new_prop->length, GFP_KERNEL);
  70. if (!new_prop->value) {
  71. kfree(new_prop->name);
  72. kfree(new_prop);
  73. return -ENOMEM;
  74. }
  75. memcpy(new_prop->value, value, vd);
  76. val = (u32 *)new_prop->value;
  77. rc = cxl_update_properties(dn, new_prop);
  78. pr_devel("%s: update property (%s, length: %i, value: %#x)\n",
  79. dn->name, name, vd, be32_to_cpu(*val));
  80. if (rc) {
  81. kfree(new_prop->name);
  82. kfree(new_prop->value);
  83. kfree(new_prop);
  84. }
  85. return rc;
  86. }
  87. static int update_node(__be32 phandle, s32 scope)
  88. {
  89. struct update_props_workarea *upwa;
  90. struct device_node *dn;
  91. int i, rc, ret;
  92. char *prop_data;
  93. char *buf;
  94. int token;
  95. u32 nprops;
  96. u32 vd;
  97. token = rtas_token("ibm,update-properties");
  98. if (token == RTAS_UNKNOWN_SERVICE)
  99. return -EINVAL;
  100. buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
  101. if (!buf)
  102. return -ENOMEM;
  103. dn = of_find_node_by_phandle(be32_to_cpu(phandle));
  104. if (!dn) {
  105. kfree(buf);
  106. return -ENOENT;
  107. }
  108. upwa = (struct update_props_workarea *)&buf[0];
  109. upwa->phandle = phandle;
  110. do {
  111. rc = rcall(token, buf, scope);
  112. if (rc < 0)
  113. break;
  114. prop_data = buf + sizeof(*upwa);
  115. nprops = be32_to_cpu(upwa->nprops);
  116. if (*prop_data == 0) {
  117. prop_data++;
  118. vd = be32_to_cpu(*(__be32 *)prop_data);
  119. prop_data += vd + sizeof(vd);
  120. nprops--;
  121. }
  122. for (i = 0; i < nprops; i++) {
  123. char *prop_name;
  124. prop_name = prop_data;
  125. prop_data += strlen(prop_name) + 1;
  126. vd = be32_to_cpu(*(__be32 *)prop_data);
  127. prop_data += sizeof(vd);
  128. if ((vd != 0x00000000) && (vd != 0x80000000)) {
  129. ret = update_property(dn, prop_name, vd,
  130. prop_data);
  131. if (ret)
  132. pr_err("cxl: Could not update property %s - %i\n",
  133. prop_name, ret);
  134. prop_data += vd;
  135. }
  136. }
  137. } while (rc == 1);
  138. of_node_put(dn);
  139. kfree(buf);
  140. return rc;
  141. }
  142. static int update_devicetree(struct cxl *adapter, s32 scope)
  143. {
  144. struct update_nodes_workarea *unwa;
  145. u32 action, node_count;
  146. int token, rc, i;
  147. __be32 *data, drc_index, phandle;
  148. char *buf;
  149. token = rtas_token("ibm,update-nodes");
  150. if (token == RTAS_UNKNOWN_SERVICE)
  151. return -EINVAL;
  152. buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
  153. if (!buf)
  154. return -ENOMEM;
  155. unwa = (struct update_nodes_workarea *)&buf[0];
  156. unwa->unit_address = cpu_to_be64(adapter->guest->handle);
  157. do {
  158. rc = rcall(token, buf, scope);
  159. if (rc && rc != 1)
  160. break;
  161. data = (__be32 *)buf + 4;
  162. while (be32_to_cpu(*data) & NODE_ACTION_MASK) {
  163. action = be32_to_cpu(*data) & NODE_ACTION_MASK;
  164. node_count = be32_to_cpu(*data) & NODE_COUNT_MASK;
  165. pr_devel("device reconfiguration - action: %#x, nodes: %#x\n",
  166. action, node_count);
  167. data++;
  168. for (i = 0; i < node_count; i++) {
  169. phandle = *data++;
  170. switch (action) {
  171. case OPCODE_DELETE:
  172. /* nothing to do */
  173. break;
  174. case OPCODE_UPDATE:
  175. update_node(phandle, scope);
  176. break;
  177. case OPCODE_ADD:
  178. /* nothing to do, just move pointer */
  179. drc_index = *data++;
  180. break;
  181. }
  182. }
  183. }
  184. } while (rc == 1);
  185. kfree(buf);
  186. return 0;
  187. }
  188. static int handle_image(struct cxl *adapter, int operation,
  189. long (*fct)(u64, u64, u64, u64 *),
  190. struct cxl_adapter_image *ai)
  191. {
  192. size_t mod, s_copy, len_chunk = 0;
  193. struct ai_header *header = NULL;
  194. unsigned int entries = 0, i;
  195. void *dest, *from;
  196. int rc = 0, need_header;
  197. /* base adapter image header */
  198. need_header = (ai->flags & CXL_AI_NEED_HEADER);
  199. if (need_header) {
  200. header = kzalloc(sizeof(struct ai_header), GFP_KERNEL);
  201. if (!header)
  202. return -ENOMEM;
  203. header->version = cpu_to_be16(1);
  204. header->vendor = cpu_to_be16(adapter->guest->vendor);
  205. header->device = cpu_to_be16(adapter->guest->device);
  206. header->subsystem_vendor = cpu_to_be16(adapter->guest->subsystem_vendor);
  207. header->subsystem = cpu_to_be16(adapter->guest->subsystem);
  208. header->image_offset = cpu_to_be64(CXL_AI_HEADER_SIZE);
  209. header->image_length = cpu_to_be64(ai->len_image);
  210. }
  211. /* number of entries in the list */
  212. len_chunk = ai->len_data;
  213. if (need_header)
  214. len_chunk += CXL_AI_HEADER_SIZE;
  215. entries = len_chunk / CXL_AI_BUFFER_SIZE;
  216. mod = len_chunk % CXL_AI_BUFFER_SIZE;
  217. if (mod)
  218. entries++;
  219. if (entries > CXL_AI_MAX_ENTRIES) {
  220. rc = -EINVAL;
  221. goto err;
  222. }
  223. /* < -- MAX_CHUNK_SIZE = 4096 * 256 = 1048576 bytes -->
  224. * chunk 0 ----------------------------------------------------
  225. * | header | data |
  226. * ----------------------------------------------------
  227. * chunk 1 ----------------------------------------------------
  228. * | data |
  229. * ----------------------------------------------------
  230. * ....
  231. * chunk n ----------------------------------------------------
  232. * | data |
  233. * ----------------------------------------------------
  234. */
  235. from = (void *) ai->data;
  236. for (i = 0; i < entries; i++) {
  237. dest = buffer[i];
  238. s_copy = CXL_AI_BUFFER_SIZE;
  239. if ((need_header) && (i == 0)) {
  240. /* add adapter image header */
  241. memcpy(buffer[i], header, sizeof(struct ai_header));
  242. s_copy = CXL_AI_BUFFER_SIZE - CXL_AI_HEADER_SIZE;
  243. dest += CXL_AI_HEADER_SIZE; /* image offset */
  244. }
  245. if ((i == (entries - 1)) && mod)
  246. s_copy = mod;
  247. /* copy data */
  248. if (copy_from_user(dest, from, s_copy))
  249. goto err;
  250. /* fill in the list */
  251. le[i].phys_addr = cpu_to_be64(virt_to_phys(buffer[i]));
  252. le[i].len = cpu_to_be64(CXL_AI_BUFFER_SIZE);
  253. if ((i == (entries - 1)) && mod)
  254. le[i].len = cpu_to_be64(mod);
  255. from += s_copy;
  256. }
  257. pr_devel("%s (op: %i, need header: %i, entries: %i, token: %#llx)\n",
  258. __func__, operation, need_header, entries, continue_token);
  259. /*
  260. * download/validate the adapter image to the coherent
  261. * platform facility
  262. */
  263. rc = fct(adapter->guest->handle, virt_to_phys(le), entries,
  264. &continue_token);
  265. if (rc == 0) /* success of download/validation operation */
  266. continue_token = 0;
  267. err:
  268. kfree(header);
  269. return rc;
  270. }
  271. static int transfer_image(struct cxl *adapter, int operation,
  272. struct cxl_adapter_image *ai)
  273. {
  274. int rc = 0;
  275. int afu;
  276. switch (operation) {
  277. case DOWNLOAD_IMAGE:
  278. rc = handle_image(adapter, operation,
  279. &cxl_h_download_adapter_image, ai);
  280. if (rc < 0) {
  281. pr_devel("resetting adapter\n");
  282. cxl_h_reset_adapter(adapter->guest->handle);
  283. }
  284. return rc;
  285. case VALIDATE_IMAGE:
  286. rc = handle_image(adapter, operation,
  287. &cxl_h_validate_adapter_image, ai);
  288. if (rc < 0) {
  289. pr_devel("resetting adapter\n");
  290. cxl_h_reset_adapter(adapter->guest->handle);
  291. return rc;
  292. }
  293. if (rc == 0) {
  294. pr_devel("remove curent afu\n");
  295. for (afu = 0; afu < adapter->slices; afu++)
  296. cxl_guest_remove_afu(adapter->afu[afu]);
  297. pr_devel("resetting adapter\n");
  298. cxl_h_reset_adapter(adapter->guest->handle);
  299. /* The entire image has now been
  300. * downloaded and the validation has
  301. * been successfully performed.
  302. * After that, the partition should call
  303. * ibm,update-nodes and
  304. * ibm,update-properties to receive the
  305. * current configuration
  306. */
  307. rc = update_devicetree(adapter, DEVICE_SCOPE);
  308. transfer = 1;
  309. }
  310. return rc;
  311. }
  312. return -EINVAL;
  313. }
  314. static long ioctl_transfer_image(struct cxl *adapter, int operation,
  315. struct cxl_adapter_image __user *uai)
  316. {
  317. struct cxl_adapter_image ai;
  318. pr_devel("%s\n", __func__);
  319. if (copy_from_user(&ai, uai, sizeof(struct cxl_adapter_image)))
  320. return -EFAULT;
  321. /*
  322. * Make sure reserved fields and bits are set to 0
  323. */
  324. if (ai.reserved1 || ai.reserved2 || ai.reserved3 || ai.reserved4 ||
  325. (ai.flags & ~CXL_AI_ALL))
  326. return -EINVAL;
  327. return transfer_image(adapter, operation, &ai);
  328. }
  329. static int device_open(struct inode *inode, struct file *file)
  330. {
  331. int adapter_num = CXL_DEVT_ADAPTER(inode->i_rdev);
  332. struct cxl *adapter;
  333. int rc = 0, i;
  334. pr_devel("in %s\n", __func__);
  335. BUG_ON(sizeof(struct ai_header) != CXL_AI_HEADER_SIZE);
  336. /* Allows one process to open the device by using a semaphore */
  337. if (down_interruptible(&sem) != 0)
  338. return -EPERM;
  339. if (!(adapter = get_cxl_adapter(adapter_num))) {
  340. rc = -ENODEV;
  341. goto err_unlock;
  342. }
  343. file->private_data = adapter;
  344. continue_token = 0;
  345. transfer = 0;
  346. for (i = 0; i < CXL_AI_MAX_ENTRIES; i++)
  347. buffer[i] = NULL;
  348. /* aligned buffer containing list entries which describes up to
  349. * 1 megabyte of data (256 entries of 4096 bytes each)
  350. * Logical real address of buffer 0 - Buffer 0 length in bytes
  351. * Logical real address of buffer 1 - Buffer 1 length in bytes
  352. * Logical real address of buffer 2 - Buffer 2 length in bytes
  353. * ....
  354. * ....
  355. * Logical real address of buffer N - Buffer N length in bytes
  356. */
  357. le = (struct sg_list *)get_zeroed_page(GFP_KERNEL);
  358. if (!le) {
  359. rc = -ENOMEM;
  360. goto err;
  361. }
  362. for (i = 0; i < CXL_AI_MAX_ENTRIES; i++) {
  363. buffer[i] = (unsigned long *)get_zeroed_page(GFP_KERNEL);
  364. if (!buffer[i]) {
  365. rc = -ENOMEM;
  366. goto err1;
  367. }
  368. }
  369. return 0;
  370. err1:
  371. for (i = 0; i < CXL_AI_MAX_ENTRIES; i++) {
  372. if (buffer[i])
  373. free_page((unsigned long) buffer[i]);
  374. }
  375. if (le)
  376. free_page((unsigned long) le);
  377. err:
  378. put_device(&adapter->dev);
  379. err_unlock:
  380. up(&sem);
  381. return rc;
  382. }
  383. static long device_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  384. {
  385. struct cxl *adapter = file->private_data;
  386. pr_devel("in %s\n", __func__);
  387. if (cmd == CXL_IOCTL_DOWNLOAD_IMAGE)
  388. return ioctl_transfer_image(adapter,
  389. DOWNLOAD_IMAGE,
  390. (struct cxl_adapter_image __user *)arg);
  391. else if (cmd == CXL_IOCTL_VALIDATE_IMAGE)
  392. return ioctl_transfer_image(adapter,
  393. VALIDATE_IMAGE,
  394. (struct cxl_adapter_image __user *)arg);
  395. else
  396. return -EINVAL;
  397. }
  398. static long device_compat_ioctl(struct file *file, unsigned int cmd,
  399. unsigned long arg)
  400. {
  401. return device_ioctl(file, cmd, arg);
  402. }
  403. static int device_close(struct inode *inode, struct file *file)
  404. {
  405. struct cxl *adapter = file->private_data;
  406. int i;
  407. pr_devel("in %s\n", __func__);
  408. for (i = 0; i < CXL_AI_MAX_ENTRIES; i++) {
  409. if (buffer[i])
  410. free_page((unsigned long) buffer[i]);
  411. }
  412. if (le)
  413. free_page((unsigned long) le);
  414. up(&sem);
  415. put_device(&adapter->dev);
  416. continue_token = 0;
  417. /* reload the module */
  418. if (transfer)
  419. cxl_guest_reload_module(adapter);
  420. else {
  421. pr_devel("resetting adapter\n");
  422. cxl_h_reset_adapter(adapter->guest->handle);
  423. }
  424. transfer = 0;
  425. return 0;
  426. }
  427. static const struct file_operations fops = {
  428. .owner = THIS_MODULE,
  429. .open = device_open,
  430. .unlocked_ioctl = device_ioctl,
  431. .compat_ioctl = device_compat_ioctl,
  432. .release = device_close,
  433. };
  434. void cxl_guest_remove_chardev(struct cxl *adapter)
  435. {
  436. cdev_del(&adapter->guest->cdev);
  437. }
  438. int cxl_guest_add_chardev(struct cxl *adapter)
  439. {
  440. dev_t devt;
  441. int rc;
  442. devt = MKDEV(MAJOR(cxl_get_dev()), CXL_CARD_MINOR(adapter));
  443. cdev_init(&adapter->guest->cdev, &fops);
  444. if ((rc = cdev_add(&adapter->guest->cdev, devt, 1))) {
  445. dev_err(&adapter->dev,
  446. "Unable to add chardev on adapter (card%i): %i\n",
  447. adapter->adapter_num, rc);
  448. goto err;
  449. }
  450. adapter->dev.devt = devt;
  451. sema_init(&sem, 1);
  452. err:
  453. return rc;
  454. }