kexec_file.c 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031
  1. /*
  2. * kexec: kexec_file_load system call
  3. *
  4. * Copyright (C) 2014 Red Hat Inc.
  5. * Authors:
  6. * Vivek Goyal <vgoyal@redhat.com>
  7. *
  8. * This source code is licensed under the GNU General Public License,
  9. * Version 2. See the file COPYING for more details.
  10. */
  11. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  12. #include <linux/capability.h>
  13. #include <linux/mm.h>
  14. #include <linux/file.h>
  15. #include <linux/slab.h>
  16. #include <linux/kexec.h>
  17. #include <linux/mutex.h>
  18. #include <linux/list.h>
  19. #include <linux/fs.h>
  20. #include <linux/ima.h>
  21. #include <crypto/hash.h>
  22. #include <crypto/sha.h>
  23. #include <linux/syscalls.h>
  24. #include <linux/vmalloc.h>
  25. #include "kexec_internal.h"
  26. static int kexec_calculate_store_digests(struct kimage *image);
  27. /* Architectures can provide this probe function */
  28. int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
  29. unsigned long buf_len)
  30. {
  31. return -ENOEXEC;
  32. }
  33. void * __weak arch_kexec_kernel_image_load(struct kimage *image)
  34. {
  35. return ERR_PTR(-ENOEXEC);
  36. }
  37. int __weak arch_kimage_file_post_load_cleanup(struct kimage *image)
  38. {
  39. return -EINVAL;
  40. }
  41. #ifdef CONFIG_KEXEC_VERIFY_SIG
  42. int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf,
  43. unsigned long buf_len)
  44. {
  45. return -EKEYREJECTED;
  46. }
  47. #endif
  48. /* Apply relocations of type RELA */
  49. int __weak
  50. arch_kexec_apply_relocations_add(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
  51. unsigned int relsec)
  52. {
  53. pr_err("RELA relocation unsupported.\n");
  54. return -ENOEXEC;
  55. }
  56. /* Apply relocations of type REL */
  57. int __weak
  58. arch_kexec_apply_relocations(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
  59. unsigned int relsec)
  60. {
  61. pr_err("REL relocation unsupported.\n");
  62. return -ENOEXEC;
  63. }
  64. /*
  65. * Free up memory used by kernel, initrd, and command line. This is temporary
  66. * memory allocation which is not needed any more after these buffers have
  67. * been loaded into separate segments and have been copied elsewhere.
  68. */
  69. void kimage_file_post_load_cleanup(struct kimage *image)
  70. {
  71. struct purgatory_info *pi = &image->purgatory_info;
  72. vfree(image->kernel_buf);
  73. image->kernel_buf = NULL;
  74. vfree(image->initrd_buf);
  75. image->initrd_buf = NULL;
  76. kfree(image->cmdline_buf);
  77. image->cmdline_buf = NULL;
  78. vfree(pi->purgatory_buf);
  79. pi->purgatory_buf = NULL;
  80. vfree(pi->sechdrs);
  81. pi->sechdrs = NULL;
  82. #ifdef CONFIG_IMA_KEXEC
  83. vfree(image->ima_buffer);
  84. image->ima_buffer = NULL;
  85. #endif /* CONFIG_IMA_KEXEC */
  86. /* See if architecture has anything to cleanup post load */
  87. arch_kimage_file_post_load_cleanup(image);
  88. /*
  89. * Above call should have called into bootloader to free up
  90. * any data stored in kimage->image_loader_data. It should
  91. * be ok now to free it up.
  92. */
  93. kfree(image->image_loader_data);
  94. image->image_loader_data = NULL;
  95. }
  96. /*
  97. * In file mode list of segments is prepared by kernel. Copy relevant
  98. * data from user space, do error checking, prepare segment list
  99. */
  100. static int
  101. kimage_file_prepare_segments(struct kimage *image, int kernel_fd, int initrd_fd,
  102. const char __user *cmdline_ptr,
  103. unsigned long cmdline_len, unsigned flags)
  104. {
  105. int ret = 0;
  106. void *ldata;
  107. loff_t size;
  108. ret = kernel_read_file_from_fd(kernel_fd, &image->kernel_buf,
  109. &size, INT_MAX, READING_KEXEC_IMAGE);
  110. if (ret)
  111. return ret;
  112. image->kernel_buf_len = size;
  113. /* IMA needs to pass the measurement list to the next kernel. */
  114. ima_add_kexec_buffer(image);
  115. /* Call arch image probe handlers */
  116. ret = arch_kexec_kernel_image_probe(image, image->kernel_buf,
  117. image->kernel_buf_len);
  118. if (ret)
  119. goto out;
  120. #ifdef CONFIG_KEXEC_VERIFY_SIG
  121. ret = arch_kexec_kernel_verify_sig(image, image->kernel_buf,
  122. image->kernel_buf_len);
  123. if (ret) {
  124. pr_debug("kernel signature verification failed.\n");
  125. goto out;
  126. }
  127. pr_debug("kernel signature verification successful.\n");
  128. #endif
  129. /* It is possible that there no initramfs is being loaded */
  130. if (!(flags & KEXEC_FILE_NO_INITRAMFS)) {
  131. ret = kernel_read_file_from_fd(initrd_fd, &image->initrd_buf,
  132. &size, INT_MAX,
  133. READING_KEXEC_INITRAMFS);
  134. if (ret)
  135. goto out;
  136. image->initrd_buf_len = size;
  137. }
  138. if (cmdline_len) {
  139. image->cmdline_buf = memdup_user(cmdline_ptr, cmdline_len);
  140. if (IS_ERR(image->cmdline_buf)) {
  141. ret = PTR_ERR(image->cmdline_buf);
  142. image->cmdline_buf = NULL;
  143. goto out;
  144. }
  145. image->cmdline_buf_len = cmdline_len;
  146. /* command line should be a string with last byte null */
  147. if (image->cmdline_buf[cmdline_len - 1] != '\0') {
  148. ret = -EINVAL;
  149. goto out;
  150. }
  151. }
  152. /* Call arch image load handlers */
  153. ldata = arch_kexec_kernel_image_load(image);
  154. if (IS_ERR(ldata)) {
  155. ret = PTR_ERR(ldata);
  156. goto out;
  157. }
  158. image->image_loader_data = ldata;
  159. out:
  160. /* In case of error, free up all allocated memory in this function */
  161. if (ret)
  162. kimage_file_post_load_cleanup(image);
  163. return ret;
  164. }
  165. static int
  166. kimage_file_alloc_init(struct kimage **rimage, int kernel_fd,
  167. int initrd_fd, const char __user *cmdline_ptr,
  168. unsigned long cmdline_len, unsigned long flags)
  169. {
  170. int ret;
  171. struct kimage *image;
  172. bool kexec_on_panic = flags & KEXEC_FILE_ON_CRASH;
  173. image = do_kimage_alloc_init();
  174. if (!image)
  175. return -ENOMEM;
  176. image->file_mode = 1;
  177. if (kexec_on_panic) {
  178. /* Enable special crash kernel control page alloc policy. */
  179. image->control_page = crashk_res.start;
  180. image->type = KEXEC_TYPE_CRASH;
  181. }
  182. ret = kimage_file_prepare_segments(image, kernel_fd, initrd_fd,
  183. cmdline_ptr, cmdline_len, flags);
  184. if (ret)
  185. goto out_free_image;
  186. ret = sanity_check_segment_list(image);
  187. if (ret)
  188. goto out_free_post_load_bufs;
  189. ret = -ENOMEM;
  190. image->control_code_page = kimage_alloc_control_pages(image,
  191. get_order(KEXEC_CONTROL_PAGE_SIZE));
  192. if (!image->control_code_page) {
  193. pr_err("Could not allocate control_code_buffer\n");
  194. goto out_free_post_load_bufs;
  195. }
  196. if (!kexec_on_panic) {
  197. image->swap_page = kimage_alloc_control_pages(image, 0);
  198. if (!image->swap_page) {
  199. pr_err("Could not allocate swap buffer\n");
  200. goto out_free_control_pages;
  201. }
  202. }
  203. *rimage = image;
  204. return 0;
  205. out_free_control_pages:
  206. kimage_free_page_list(&image->control_pages);
  207. out_free_post_load_bufs:
  208. kimage_file_post_load_cleanup(image);
  209. out_free_image:
  210. kfree(image);
  211. return ret;
  212. }
  213. SYSCALL_DEFINE5(kexec_file_load, int, kernel_fd, int, initrd_fd,
  214. unsigned long, cmdline_len, const char __user *, cmdline_ptr,
  215. unsigned long, flags)
  216. {
  217. int ret = 0, i;
  218. struct kimage **dest_image, *image;
  219. /* We only trust the superuser with rebooting the system. */
  220. if (!capable(CAP_SYS_BOOT) || kexec_load_disabled)
  221. return -EPERM;
  222. /* Make sure we have a legal set of flags */
  223. if (flags != (flags & KEXEC_FILE_FLAGS))
  224. return -EINVAL;
  225. image = NULL;
  226. if (!mutex_trylock(&kexec_mutex))
  227. return -EBUSY;
  228. dest_image = &kexec_image;
  229. if (flags & KEXEC_FILE_ON_CRASH) {
  230. dest_image = &kexec_crash_image;
  231. if (kexec_crash_image)
  232. arch_kexec_unprotect_crashkres();
  233. }
  234. if (flags & KEXEC_FILE_UNLOAD)
  235. goto exchange;
  236. /*
  237. * In case of crash, new kernel gets loaded in reserved region. It is
  238. * same memory where old crash kernel might be loaded. Free any
  239. * current crash dump kernel before we corrupt it.
  240. */
  241. if (flags & KEXEC_FILE_ON_CRASH)
  242. kimage_free(xchg(&kexec_crash_image, NULL));
  243. ret = kimage_file_alloc_init(&image, kernel_fd, initrd_fd, cmdline_ptr,
  244. cmdline_len, flags);
  245. if (ret)
  246. goto out;
  247. ret = machine_kexec_prepare(image);
  248. if (ret)
  249. goto out;
  250. /*
  251. * Some architecture(like S390) may touch the crash memory before
  252. * machine_kexec_prepare(), we must copy vmcoreinfo data after it.
  253. */
  254. ret = kimage_crash_copy_vmcoreinfo(image);
  255. if (ret)
  256. goto out;
  257. ret = kexec_calculate_store_digests(image);
  258. if (ret)
  259. goto out;
  260. for (i = 0; i < image->nr_segments; i++) {
  261. struct kexec_segment *ksegment;
  262. ksegment = &image->segment[i];
  263. pr_debug("Loading segment %d: buf=0x%p bufsz=0x%zx mem=0x%lx memsz=0x%zx\n",
  264. i, ksegment->buf, ksegment->bufsz, ksegment->mem,
  265. ksegment->memsz);
  266. ret = kimage_load_segment(image, &image->segment[i]);
  267. if (ret)
  268. goto out;
  269. }
  270. kimage_terminate(image);
  271. /*
  272. * Free up any temporary buffers allocated which are not needed
  273. * after image has been loaded
  274. */
  275. kimage_file_post_load_cleanup(image);
  276. exchange:
  277. image = xchg(dest_image, image);
  278. out:
  279. if ((flags & KEXEC_FILE_ON_CRASH) && kexec_crash_image)
  280. arch_kexec_protect_crashkres();
  281. mutex_unlock(&kexec_mutex);
  282. kimage_free(image);
  283. return ret;
  284. }
  285. static int locate_mem_hole_top_down(unsigned long start, unsigned long end,
  286. struct kexec_buf *kbuf)
  287. {
  288. struct kimage *image = kbuf->image;
  289. unsigned long temp_start, temp_end;
  290. temp_end = min(end, kbuf->buf_max);
  291. temp_start = temp_end - kbuf->memsz;
  292. do {
  293. /* align down start */
  294. temp_start = temp_start & (~(kbuf->buf_align - 1));
  295. if (temp_start < start || temp_start < kbuf->buf_min)
  296. return 0;
  297. temp_end = temp_start + kbuf->memsz - 1;
  298. /*
  299. * Make sure this does not conflict with any of existing
  300. * segments
  301. */
  302. if (kimage_is_destination_range(image, temp_start, temp_end)) {
  303. temp_start = temp_start - PAGE_SIZE;
  304. continue;
  305. }
  306. /* We found a suitable memory range */
  307. break;
  308. } while (1);
  309. /* If we are here, we found a suitable memory range */
  310. kbuf->mem = temp_start;
  311. /* Success, stop navigating through remaining System RAM ranges */
  312. return 1;
  313. }
  314. static int locate_mem_hole_bottom_up(unsigned long start, unsigned long end,
  315. struct kexec_buf *kbuf)
  316. {
  317. struct kimage *image = kbuf->image;
  318. unsigned long temp_start, temp_end;
  319. temp_start = max(start, kbuf->buf_min);
  320. do {
  321. temp_start = ALIGN(temp_start, kbuf->buf_align);
  322. temp_end = temp_start + kbuf->memsz - 1;
  323. if (temp_end > end || temp_end > kbuf->buf_max)
  324. return 0;
  325. /*
  326. * Make sure this does not conflict with any of existing
  327. * segments
  328. */
  329. if (kimage_is_destination_range(image, temp_start, temp_end)) {
  330. temp_start = temp_start + PAGE_SIZE;
  331. continue;
  332. }
  333. /* We found a suitable memory range */
  334. break;
  335. } while (1);
  336. /* If we are here, we found a suitable memory range */
  337. kbuf->mem = temp_start;
  338. /* Success, stop navigating through remaining System RAM ranges */
  339. return 1;
  340. }
  341. static int locate_mem_hole_callback(u64 start, u64 end, void *arg)
  342. {
  343. struct kexec_buf *kbuf = (struct kexec_buf *)arg;
  344. unsigned long sz = end - start + 1;
  345. /* Returning 0 will take to next memory range */
  346. if (sz < kbuf->memsz)
  347. return 0;
  348. if (end < kbuf->buf_min || start > kbuf->buf_max)
  349. return 0;
  350. /*
  351. * Allocate memory top down with-in ram range. Otherwise bottom up
  352. * allocation.
  353. */
  354. if (kbuf->top_down)
  355. return locate_mem_hole_top_down(start, end, kbuf);
  356. return locate_mem_hole_bottom_up(start, end, kbuf);
  357. }
  358. /**
  359. * arch_kexec_walk_mem - call func(data) on free memory regions
  360. * @kbuf: Context info for the search. Also passed to @func.
  361. * @func: Function to call for each memory region.
  362. *
  363. * Return: The memory walk will stop when func returns a non-zero value
  364. * and that value will be returned. If all free regions are visited without
  365. * func returning non-zero, then zero will be returned.
  366. */
  367. int __weak arch_kexec_walk_mem(struct kexec_buf *kbuf,
  368. int (*func)(u64, u64, void *))
  369. {
  370. if (kbuf->image->type == KEXEC_TYPE_CRASH)
  371. return walk_iomem_res_desc(crashk_res.desc,
  372. IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY,
  373. crashk_res.start, crashk_res.end,
  374. kbuf, func);
  375. else
  376. return walk_system_ram_res(0, ULONG_MAX, kbuf, func);
  377. }
  378. /**
  379. * kexec_locate_mem_hole - find free memory for the purgatory or the next kernel
  380. * @kbuf: Parameters for the memory search.
  381. *
  382. * On success, kbuf->mem will have the start address of the memory region found.
  383. *
  384. * Return: 0 on success, negative errno on error.
  385. */
  386. int kexec_locate_mem_hole(struct kexec_buf *kbuf)
  387. {
  388. int ret;
  389. ret = arch_kexec_walk_mem(kbuf, locate_mem_hole_callback);
  390. return ret == 1 ? 0 : -EADDRNOTAVAIL;
  391. }
  392. /**
  393. * kexec_add_buffer - place a buffer in a kexec segment
  394. * @kbuf: Buffer contents and memory parameters.
  395. *
  396. * This function assumes that kexec_mutex is held.
  397. * On successful return, @kbuf->mem will have the physical address of
  398. * the buffer in memory.
  399. *
  400. * Return: 0 on success, negative errno on error.
  401. */
  402. int kexec_add_buffer(struct kexec_buf *kbuf)
  403. {
  404. struct kexec_segment *ksegment;
  405. int ret;
  406. /* Currently adding segment this way is allowed only in file mode */
  407. if (!kbuf->image->file_mode)
  408. return -EINVAL;
  409. if (kbuf->image->nr_segments >= KEXEC_SEGMENT_MAX)
  410. return -EINVAL;
  411. /*
  412. * Make sure we are not trying to add buffer after allocating
  413. * control pages. All segments need to be placed first before
  414. * any control pages are allocated. As control page allocation
  415. * logic goes through list of segments to make sure there are
  416. * no destination overlaps.
  417. */
  418. if (!list_empty(&kbuf->image->control_pages)) {
  419. WARN_ON(1);
  420. return -EINVAL;
  421. }
  422. /* Ensure minimum alignment needed for segments. */
  423. kbuf->memsz = ALIGN(kbuf->memsz, PAGE_SIZE);
  424. kbuf->buf_align = max(kbuf->buf_align, PAGE_SIZE);
  425. /* Walk the RAM ranges and allocate a suitable range for the buffer */
  426. ret = kexec_locate_mem_hole(kbuf);
  427. if (ret)
  428. return ret;
  429. /* Found a suitable memory range */
  430. ksegment = &kbuf->image->segment[kbuf->image->nr_segments];
  431. ksegment->kbuf = kbuf->buffer;
  432. ksegment->bufsz = kbuf->bufsz;
  433. ksegment->mem = kbuf->mem;
  434. ksegment->memsz = kbuf->memsz;
  435. kbuf->image->nr_segments++;
  436. return 0;
  437. }
  438. /* Calculate and store the digest of segments */
  439. static int kexec_calculate_store_digests(struct kimage *image)
  440. {
  441. struct crypto_shash *tfm;
  442. struct shash_desc *desc;
  443. int ret = 0, i, j, zero_buf_sz, sha_region_sz;
  444. size_t desc_size, nullsz;
  445. char *digest;
  446. void *zero_buf;
  447. struct kexec_sha_region *sha_regions;
  448. struct purgatory_info *pi = &image->purgatory_info;
  449. zero_buf = __va(page_to_pfn(ZERO_PAGE(0)) << PAGE_SHIFT);
  450. zero_buf_sz = PAGE_SIZE;
  451. tfm = crypto_alloc_shash("sha256", 0, 0);
  452. if (IS_ERR(tfm)) {
  453. ret = PTR_ERR(tfm);
  454. goto out;
  455. }
  456. desc_size = crypto_shash_descsize(tfm) + sizeof(*desc);
  457. desc = kzalloc(desc_size, GFP_KERNEL);
  458. if (!desc) {
  459. ret = -ENOMEM;
  460. goto out_free_tfm;
  461. }
  462. sha_region_sz = KEXEC_SEGMENT_MAX * sizeof(struct kexec_sha_region);
  463. sha_regions = vzalloc(sha_region_sz);
  464. if (!sha_regions) {
  465. ret = -ENOMEM;
  466. goto out_free_desc;
  467. }
  468. desc->tfm = tfm;
  469. desc->flags = 0;
  470. ret = crypto_shash_init(desc);
  471. if (ret < 0)
  472. goto out_free_sha_regions;
  473. digest = kzalloc(SHA256_DIGEST_SIZE, GFP_KERNEL);
  474. if (!digest) {
  475. ret = -ENOMEM;
  476. goto out_free_sha_regions;
  477. }
  478. for (j = i = 0; i < image->nr_segments; i++) {
  479. struct kexec_segment *ksegment;
  480. ksegment = &image->segment[i];
  481. /*
  482. * Skip purgatory as it will be modified once we put digest
  483. * info in purgatory.
  484. */
  485. if (ksegment->kbuf == pi->purgatory_buf)
  486. continue;
  487. ret = crypto_shash_update(desc, ksegment->kbuf,
  488. ksegment->bufsz);
  489. if (ret)
  490. break;
  491. /*
  492. * Assume rest of the buffer is filled with zero and
  493. * update digest accordingly.
  494. */
  495. nullsz = ksegment->memsz - ksegment->bufsz;
  496. while (nullsz) {
  497. unsigned long bytes = nullsz;
  498. if (bytes > zero_buf_sz)
  499. bytes = zero_buf_sz;
  500. ret = crypto_shash_update(desc, zero_buf, bytes);
  501. if (ret)
  502. break;
  503. nullsz -= bytes;
  504. }
  505. if (ret)
  506. break;
  507. sha_regions[j].start = ksegment->mem;
  508. sha_regions[j].len = ksegment->memsz;
  509. j++;
  510. }
  511. if (!ret) {
  512. ret = crypto_shash_final(desc, digest);
  513. if (ret)
  514. goto out_free_digest;
  515. ret = kexec_purgatory_get_set_symbol(image, "purgatory_sha_regions",
  516. sha_regions, sha_region_sz, 0);
  517. if (ret)
  518. goto out_free_digest;
  519. ret = kexec_purgatory_get_set_symbol(image, "purgatory_sha256_digest",
  520. digest, SHA256_DIGEST_SIZE, 0);
  521. if (ret)
  522. goto out_free_digest;
  523. }
  524. out_free_digest:
  525. kfree(digest);
  526. out_free_sha_regions:
  527. vfree(sha_regions);
  528. out_free_desc:
  529. kfree(desc);
  530. out_free_tfm:
  531. kfree(tfm);
  532. out:
  533. return ret;
  534. }
  535. /* Actually load purgatory. Lot of code taken from kexec-tools */
  536. static int __kexec_load_purgatory(struct kimage *image, unsigned long min,
  537. unsigned long max, int top_down)
  538. {
  539. struct purgatory_info *pi = &image->purgatory_info;
  540. unsigned long align, bss_align, bss_sz, bss_pad;
  541. unsigned long entry, load_addr, curr_load_addr, bss_addr, offset;
  542. unsigned char *buf_addr, *src;
  543. int i, ret = 0, entry_sidx = -1;
  544. const Elf_Shdr *sechdrs_c;
  545. Elf_Shdr *sechdrs = NULL;
  546. struct kexec_buf kbuf = { .image = image, .bufsz = 0, .buf_align = 1,
  547. .buf_min = min, .buf_max = max,
  548. .top_down = top_down };
  549. /*
  550. * sechdrs_c points to section headers in purgatory and are read
  551. * only. No modifications allowed.
  552. */
  553. sechdrs_c = (void *)pi->ehdr + pi->ehdr->e_shoff;
  554. /*
  555. * We can not modify sechdrs_c[] and its fields. It is read only.
  556. * Copy it over to a local copy where one can store some temporary
  557. * data and free it at the end. We need to modify ->sh_addr and
  558. * ->sh_offset fields to keep track of permanent and temporary
  559. * locations of sections.
  560. */
  561. sechdrs = vzalloc(pi->ehdr->e_shnum * sizeof(Elf_Shdr));
  562. if (!sechdrs)
  563. return -ENOMEM;
  564. memcpy(sechdrs, sechdrs_c, pi->ehdr->e_shnum * sizeof(Elf_Shdr));
  565. /*
  566. * We seem to have multiple copies of sections. First copy is which
  567. * is embedded in kernel in read only section. Some of these sections
  568. * will be copied to a temporary buffer and relocated. And these
  569. * sections will finally be copied to their final destination at
  570. * segment load time.
  571. *
  572. * Use ->sh_offset to reflect section address in memory. It will
  573. * point to original read only copy if section is not allocatable.
  574. * Otherwise it will point to temporary copy which will be relocated.
  575. *
  576. * Use ->sh_addr to contain final address of the section where it
  577. * will go during execution time.
  578. */
  579. for (i = 0; i < pi->ehdr->e_shnum; i++) {
  580. if (sechdrs[i].sh_type == SHT_NOBITS)
  581. continue;
  582. sechdrs[i].sh_offset = (unsigned long)pi->ehdr +
  583. sechdrs[i].sh_offset;
  584. }
  585. /*
  586. * Identify entry point section and make entry relative to section
  587. * start.
  588. */
  589. entry = pi->ehdr->e_entry;
  590. for (i = 0; i < pi->ehdr->e_shnum; i++) {
  591. if (!(sechdrs[i].sh_flags & SHF_ALLOC))
  592. continue;
  593. if (!(sechdrs[i].sh_flags & SHF_EXECINSTR))
  594. continue;
  595. /* Make entry section relative */
  596. if (sechdrs[i].sh_addr <= pi->ehdr->e_entry &&
  597. ((sechdrs[i].sh_addr + sechdrs[i].sh_size) >
  598. pi->ehdr->e_entry)) {
  599. entry_sidx = i;
  600. entry -= sechdrs[i].sh_addr;
  601. break;
  602. }
  603. }
  604. /* Determine how much memory is needed to load relocatable object. */
  605. bss_align = 1;
  606. bss_sz = 0;
  607. for (i = 0; i < pi->ehdr->e_shnum; i++) {
  608. if (!(sechdrs[i].sh_flags & SHF_ALLOC))
  609. continue;
  610. align = sechdrs[i].sh_addralign;
  611. if (sechdrs[i].sh_type != SHT_NOBITS) {
  612. if (kbuf.buf_align < align)
  613. kbuf.buf_align = align;
  614. kbuf.bufsz = ALIGN(kbuf.bufsz, align);
  615. kbuf.bufsz += sechdrs[i].sh_size;
  616. } else {
  617. /* bss section */
  618. if (bss_align < align)
  619. bss_align = align;
  620. bss_sz = ALIGN(bss_sz, align);
  621. bss_sz += sechdrs[i].sh_size;
  622. }
  623. }
  624. /* Determine the bss padding required to align bss properly */
  625. bss_pad = 0;
  626. if (kbuf.bufsz & (bss_align - 1))
  627. bss_pad = bss_align - (kbuf.bufsz & (bss_align - 1));
  628. kbuf.memsz = kbuf.bufsz + bss_pad + bss_sz;
  629. /* Allocate buffer for purgatory */
  630. kbuf.buffer = vzalloc(kbuf.bufsz);
  631. if (!kbuf.buffer) {
  632. ret = -ENOMEM;
  633. goto out;
  634. }
  635. if (kbuf.buf_align < bss_align)
  636. kbuf.buf_align = bss_align;
  637. /* Add buffer to segment list */
  638. ret = kexec_add_buffer(&kbuf);
  639. if (ret)
  640. goto out;
  641. pi->purgatory_load_addr = kbuf.mem;
  642. /* Load SHF_ALLOC sections */
  643. buf_addr = kbuf.buffer;
  644. load_addr = curr_load_addr = pi->purgatory_load_addr;
  645. bss_addr = load_addr + kbuf.bufsz + bss_pad;
  646. for (i = 0; i < pi->ehdr->e_shnum; i++) {
  647. if (!(sechdrs[i].sh_flags & SHF_ALLOC))
  648. continue;
  649. align = sechdrs[i].sh_addralign;
  650. if (sechdrs[i].sh_type != SHT_NOBITS) {
  651. curr_load_addr = ALIGN(curr_load_addr, align);
  652. offset = curr_load_addr - load_addr;
  653. /* We already modifed ->sh_offset to keep src addr */
  654. src = (char *) sechdrs[i].sh_offset;
  655. memcpy(buf_addr + offset, src, sechdrs[i].sh_size);
  656. /* Store load address and source address of section */
  657. sechdrs[i].sh_addr = curr_load_addr;
  658. /*
  659. * This section got copied to temporary buffer. Update
  660. * ->sh_offset accordingly.
  661. */
  662. sechdrs[i].sh_offset = (unsigned long)(buf_addr + offset);
  663. /* Advance to the next address */
  664. curr_load_addr += sechdrs[i].sh_size;
  665. } else {
  666. bss_addr = ALIGN(bss_addr, align);
  667. sechdrs[i].sh_addr = bss_addr;
  668. bss_addr += sechdrs[i].sh_size;
  669. }
  670. }
  671. /* Update entry point based on load address of text section */
  672. if (entry_sidx >= 0)
  673. entry += sechdrs[entry_sidx].sh_addr;
  674. /* Make kernel jump to purgatory after shutdown */
  675. image->start = entry;
  676. /* Used later to get/set symbol values */
  677. pi->sechdrs = sechdrs;
  678. /*
  679. * Used later to identify which section is purgatory and skip it
  680. * from checksumming.
  681. */
  682. pi->purgatory_buf = kbuf.buffer;
  683. return ret;
  684. out:
  685. vfree(sechdrs);
  686. vfree(kbuf.buffer);
  687. return ret;
  688. }
  689. static int kexec_apply_relocations(struct kimage *image)
  690. {
  691. int i, ret;
  692. struct purgatory_info *pi = &image->purgatory_info;
  693. Elf_Shdr *sechdrs = pi->sechdrs;
  694. /* Apply relocations */
  695. for (i = 0; i < pi->ehdr->e_shnum; i++) {
  696. Elf_Shdr *section, *symtab;
  697. if (sechdrs[i].sh_type != SHT_RELA &&
  698. sechdrs[i].sh_type != SHT_REL)
  699. continue;
  700. /*
  701. * For section of type SHT_RELA/SHT_REL,
  702. * ->sh_link contains section header index of associated
  703. * symbol table. And ->sh_info contains section header
  704. * index of section to which relocations apply.
  705. */
  706. if (sechdrs[i].sh_info >= pi->ehdr->e_shnum ||
  707. sechdrs[i].sh_link >= pi->ehdr->e_shnum)
  708. return -ENOEXEC;
  709. section = &sechdrs[sechdrs[i].sh_info];
  710. symtab = &sechdrs[sechdrs[i].sh_link];
  711. if (!(section->sh_flags & SHF_ALLOC))
  712. continue;
  713. /*
  714. * symtab->sh_link contain section header index of associated
  715. * string table.
  716. */
  717. if (symtab->sh_link >= pi->ehdr->e_shnum)
  718. /* Invalid section number? */
  719. continue;
  720. /*
  721. * Respective architecture needs to provide support for applying
  722. * relocations of type SHT_RELA/SHT_REL.
  723. */
  724. if (sechdrs[i].sh_type == SHT_RELA)
  725. ret = arch_kexec_apply_relocations_add(pi->ehdr,
  726. sechdrs, i);
  727. else if (sechdrs[i].sh_type == SHT_REL)
  728. ret = arch_kexec_apply_relocations(pi->ehdr,
  729. sechdrs, i);
  730. if (ret)
  731. return ret;
  732. }
  733. return 0;
  734. }
  735. /* Load relocatable purgatory object and relocate it appropriately */
  736. int kexec_load_purgatory(struct kimage *image, unsigned long min,
  737. unsigned long max, int top_down,
  738. unsigned long *load_addr)
  739. {
  740. struct purgatory_info *pi = &image->purgatory_info;
  741. int ret;
  742. if (kexec_purgatory_size <= 0)
  743. return -EINVAL;
  744. if (kexec_purgatory_size < sizeof(Elf_Ehdr))
  745. return -ENOEXEC;
  746. pi->ehdr = (Elf_Ehdr *)kexec_purgatory;
  747. if (memcmp(pi->ehdr->e_ident, ELFMAG, SELFMAG) != 0
  748. || pi->ehdr->e_type != ET_REL
  749. || !elf_check_arch(pi->ehdr)
  750. || pi->ehdr->e_shentsize != sizeof(Elf_Shdr))
  751. return -ENOEXEC;
  752. if (pi->ehdr->e_shoff >= kexec_purgatory_size
  753. || (pi->ehdr->e_shnum * sizeof(Elf_Shdr) >
  754. kexec_purgatory_size - pi->ehdr->e_shoff))
  755. return -ENOEXEC;
  756. ret = __kexec_load_purgatory(image, min, max, top_down);
  757. if (ret)
  758. return ret;
  759. ret = kexec_apply_relocations(image);
  760. if (ret)
  761. goto out;
  762. *load_addr = pi->purgatory_load_addr;
  763. return 0;
  764. out:
  765. vfree(pi->sechdrs);
  766. pi->sechdrs = NULL;
  767. vfree(pi->purgatory_buf);
  768. pi->purgatory_buf = NULL;
  769. return ret;
  770. }
  771. static Elf_Sym *kexec_purgatory_find_symbol(struct purgatory_info *pi,
  772. const char *name)
  773. {
  774. Elf_Sym *syms;
  775. Elf_Shdr *sechdrs;
  776. Elf_Ehdr *ehdr;
  777. int i, k;
  778. const char *strtab;
  779. if (!pi->sechdrs || !pi->ehdr)
  780. return NULL;
  781. sechdrs = pi->sechdrs;
  782. ehdr = pi->ehdr;
  783. for (i = 0; i < ehdr->e_shnum; i++) {
  784. if (sechdrs[i].sh_type != SHT_SYMTAB)
  785. continue;
  786. if (sechdrs[i].sh_link >= ehdr->e_shnum)
  787. /* Invalid strtab section number */
  788. continue;
  789. strtab = (char *)sechdrs[sechdrs[i].sh_link].sh_offset;
  790. syms = (Elf_Sym *)sechdrs[i].sh_offset;
  791. /* Go through symbols for a match */
  792. for (k = 0; k < sechdrs[i].sh_size/sizeof(Elf_Sym); k++) {
  793. if (ELF_ST_BIND(syms[k].st_info) != STB_GLOBAL)
  794. continue;
  795. if (strcmp(strtab + syms[k].st_name, name) != 0)
  796. continue;
  797. if (syms[k].st_shndx == SHN_UNDEF ||
  798. syms[k].st_shndx >= ehdr->e_shnum) {
  799. pr_debug("Symbol: %s has bad section index %d.\n",
  800. name, syms[k].st_shndx);
  801. return NULL;
  802. }
  803. /* Found the symbol we are looking for */
  804. return &syms[k];
  805. }
  806. }
  807. return NULL;
  808. }
  809. void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name)
  810. {
  811. struct purgatory_info *pi = &image->purgatory_info;
  812. Elf_Sym *sym;
  813. Elf_Shdr *sechdr;
  814. sym = kexec_purgatory_find_symbol(pi, name);
  815. if (!sym)
  816. return ERR_PTR(-EINVAL);
  817. sechdr = &pi->sechdrs[sym->st_shndx];
  818. /*
  819. * Returns the address where symbol will finally be loaded after
  820. * kexec_load_segment()
  821. */
  822. return (void *)(sechdr->sh_addr + sym->st_value);
  823. }
  824. /*
  825. * Get or set value of a symbol. If "get_value" is true, symbol value is
  826. * returned in buf otherwise symbol value is set based on value in buf.
  827. */
  828. int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name,
  829. void *buf, unsigned int size, bool get_value)
  830. {
  831. Elf_Sym *sym;
  832. Elf_Shdr *sechdrs;
  833. struct purgatory_info *pi = &image->purgatory_info;
  834. char *sym_buf;
  835. sym = kexec_purgatory_find_symbol(pi, name);
  836. if (!sym)
  837. return -EINVAL;
  838. if (sym->st_size != size) {
  839. pr_err("symbol %s size mismatch: expected %lu actual %u\n",
  840. name, (unsigned long)sym->st_size, size);
  841. return -EINVAL;
  842. }
  843. sechdrs = pi->sechdrs;
  844. if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) {
  845. pr_err("symbol %s is in a bss section. Cannot %s\n", name,
  846. get_value ? "get" : "set");
  847. return -EINVAL;
  848. }
  849. sym_buf = (unsigned char *)sechdrs[sym->st_shndx].sh_offset +
  850. sym->st_value;
  851. if (get_value)
  852. memcpy((void *)buf, sym_buf, size);
  853. else
  854. memcpy((void *)sym_buf, buf, size);
  855. return 0;
  856. }