efi.c 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142
  1. /*
  2. * Extensible Firmware Interface
  3. *
  4. * Based on Extensible Firmware Interface Specification version 2.4
  5. *
  6. * Copyright (C) 2013, 2014 Linaro Ltd.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. *
  12. */
  13. #include <linux/dmi.h>
  14. #include <linux/efi.h>
  15. #include <linux/init.h>
  16. #include <asm/efi.h>
  17. /*
  18. * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be
  19. * executable, everything else can be mapped with the XN bits
  20. * set. Also take the new (optional) RO/XP bits into account.
  21. */
  22. static __init pteval_t create_mapping_protection(efi_memory_desc_t *md)
  23. {
  24. u64 attr = md->attribute;
  25. u32 type = md->type;
  26. if (type == EFI_MEMORY_MAPPED_IO)
  27. return PROT_DEVICE_nGnRE;
  28. if (WARN_ONCE(!PAGE_ALIGNED(md->phys_addr),
  29. "UEFI Runtime regions are not aligned to 64 KB -- buggy firmware?"))
  30. /*
  31. * If the region is not aligned to the page size of the OS, we
  32. * can not use strict permissions, since that would also affect
  33. * the mapping attributes of the adjacent regions.
  34. */
  35. return pgprot_val(PAGE_KERNEL_EXEC);
  36. /* R-- */
  37. if ((attr & (EFI_MEMORY_XP | EFI_MEMORY_RO)) ==
  38. (EFI_MEMORY_XP | EFI_MEMORY_RO))
  39. return pgprot_val(PAGE_KERNEL_RO);
  40. /* R-X */
  41. if (attr & EFI_MEMORY_RO)
  42. return pgprot_val(PAGE_KERNEL_ROX);
  43. /* RW- */
  44. if (attr & EFI_MEMORY_XP || type != EFI_RUNTIME_SERVICES_CODE)
  45. return pgprot_val(PAGE_KERNEL);
  46. /* RWX */
  47. return pgprot_val(PAGE_KERNEL_EXEC);
  48. }
  49. /* we will fill this structure from the stub, so don't put it in .bss */
  50. struct screen_info screen_info __section(.data);
  51. int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
  52. {
  53. pteval_t prot_val = create_mapping_protection(md);
  54. bool allow_block_mappings = (md->type != EFI_RUNTIME_SERVICES_CODE &&
  55. md->type != EFI_RUNTIME_SERVICES_DATA);
  56. if (!PAGE_ALIGNED(md->phys_addr) ||
  57. !PAGE_ALIGNED(md->num_pages << EFI_PAGE_SHIFT)) {
  58. /*
  59. * If the end address of this region is not aligned to page
  60. * size, the mapping is rounded up, and may end up sharing a
  61. * page frame with the next UEFI memory region. If we create
  62. * a block entry now, we may need to split it again when mapping
  63. * the next region, and support for that is going to be removed
  64. * from the MMU routines. So avoid block mappings altogether in
  65. * that case.
  66. */
  67. allow_block_mappings = false;
  68. }
  69. create_pgd_mapping(mm, md->phys_addr, md->virt_addr,
  70. md->num_pages << EFI_PAGE_SHIFT,
  71. __pgprot(prot_val | PTE_NG), allow_block_mappings);
  72. return 0;
  73. }
  74. static int __init set_permissions(pte_t *ptep, pgtable_t token,
  75. unsigned long addr, void *data)
  76. {
  77. efi_memory_desc_t *md = data;
  78. pte_t pte = *ptep;
  79. if (md->attribute & EFI_MEMORY_RO)
  80. pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
  81. if (md->attribute & EFI_MEMORY_XP)
  82. pte = set_pte_bit(pte, __pgprot(PTE_PXN));
  83. set_pte(ptep, pte);
  84. return 0;
  85. }
  86. int __init efi_set_mapping_permissions(struct mm_struct *mm,
  87. efi_memory_desc_t *md)
  88. {
  89. BUG_ON(md->type != EFI_RUNTIME_SERVICES_CODE &&
  90. md->type != EFI_RUNTIME_SERVICES_DATA);
  91. /*
  92. * Calling apply_to_page_range() is only safe on regions that are
  93. * guaranteed to be mapped down to pages. Since we are only called
  94. * for regions that have been mapped using efi_create_mapping() above
  95. * (and this is checked by the generic Memory Attributes table parsing
  96. * routines), there is no need to check that again here.
  97. */
  98. return apply_to_page_range(mm, md->virt_addr,
  99. md->num_pages << EFI_PAGE_SHIFT,
  100. set_permissions, md);
  101. }
  102. static int __init arm64_dmi_init(void)
  103. {
  104. /*
  105. * On arm64, DMI depends on UEFI, and dmi_scan_machine() needs to
  106. * be called early because dmi_id_init(), which is an arch_initcall
  107. * itself, depends on dmi_scan_machine() having been called already.
  108. */
  109. dmi_scan_machine();
  110. if (dmi_available)
  111. dmi_set_dump_stack_arch_desc();
  112. return 0;
  113. }
  114. core_initcall(arm64_dmi_init);
  115. /*
  116. * UpdateCapsule() depends on the system being shutdown via
  117. * ResetSystem().
  118. */
  119. bool efi_poweroff_required(void)
  120. {
  121. return efi_enabled(EFI_RUNTIME_SERVICES);
  122. }