firmware_class.c 43 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863
  1. /*
  2. * firmware_class.c - Multi purpose firmware loading support
  3. *
  4. * Copyright (c) 2003 Manuel Estrada Sainz
  5. *
  6. * Please see Documentation/firmware_class/ for more information.
  7. *
  8. */
  9. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  10. #include <linux/capability.h>
  11. #include <linux/device.h>
  12. #include <linux/module.h>
  13. #include <linux/init.h>
  14. #include <linux/timer.h>
  15. #include <linux/vmalloc.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/bitops.h>
  18. #include <linux/mutex.h>
  19. #include <linux/workqueue.h>
  20. #include <linux/highmem.h>
  21. #include <linux/firmware.h>
  22. #include <linux/slab.h>
  23. #include <linux/sched.h>
  24. #include <linux/file.h>
  25. #include <linux/list.h>
  26. #include <linux/async.h>
  27. #include <linux/pm.h>
  28. #include <linux/suspend.h>
  29. #include <linux/syscore_ops.h>
  30. #include <linux/io.h>
  31. #include <linux/reboot.h>
  32. #include <generated/utsrelease.h>
  33. #include "base.h"
  34. MODULE_AUTHOR("Manuel Estrada Sainz");
  35. MODULE_DESCRIPTION("Multi purpose firmware loading support");
  36. MODULE_LICENSE("GPL");
  37. /* Builtin firmware support */
  38. #ifdef CONFIG_FW_LOADER
  39. extern struct builtin_fw __start_builtin_fw[];
  40. extern struct builtin_fw __end_builtin_fw[];
  41. static bool fw_get_builtin_firmware(struct firmware *fw, const char *name)
  42. {
  43. struct builtin_fw *b_fw;
  44. for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) {
  45. if (strcmp(name, b_fw->name) == 0) {
  46. fw->size = b_fw->size;
  47. fw->data = b_fw->data;
  48. return true;
  49. }
  50. }
  51. return false;
  52. }
  53. static bool fw_is_builtin_firmware(const struct firmware *fw)
  54. {
  55. struct builtin_fw *b_fw;
  56. for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++)
  57. if (fw->data == b_fw->data)
  58. return true;
  59. return false;
  60. }
  61. #else /* Module case - no builtin firmware support */
  62. static inline bool fw_get_builtin_firmware(struct firmware *fw, const char *name)
  63. {
  64. return false;
  65. }
  66. static inline bool fw_is_builtin_firmware(const struct firmware *fw)
  67. {
  68. return false;
  69. }
  70. #endif
  71. enum {
  72. FW_STATUS_LOADING,
  73. FW_STATUS_DONE,
  74. FW_STATUS_ABORT,
  75. };
  76. static int loading_timeout = 60; /* In seconds */
  77. static inline long firmware_loading_timeout(void)
  78. {
  79. return loading_timeout > 0 ? msecs_to_jiffies(loading_timeout * 1000) :
  80. MAX_SCHEDULE_TIMEOUT;
  81. }
  82. struct firmware_cache {
  83. /* firmware_buf instance will be added into the below list */
  84. spinlock_t lock;
  85. struct list_head head;
  86. int state;
  87. #ifdef CONFIG_PM_SLEEP
  88. /*
  89. * Names of firmware images which have been cached successfully
  90. * will be added into the below list so that device uncache
  91. * helper can trace which firmware images have been cached
  92. * before.
  93. */
  94. spinlock_t name_lock;
  95. struct list_head fw_names;
  96. struct delayed_work work;
  97. struct notifier_block pm_notify;
  98. #endif
  99. };
  100. struct firmware_buf {
  101. struct kref ref;
  102. struct list_head list;
  103. struct completion completion;
  104. struct firmware_cache *fwc;
  105. unsigned long status;
  106. void *data;
  107. size_t size;
  108. phys_addr_t dest_addr;
  109. size_t dest_size;
  110. void * (*map_fw_mem)(phys_addr_t phys, size_t size);
  111. void (*unmap_fw_mem)(void *virt);
  112. #ifdef CONFIG_FW_LOADER_USER_HELPER
  113. bool is_paged_buf;
  114. struct page **pages;
  115. int nr_pages;
  116. int page_array_size;
  117. struct list_head pending_list;
  118. #endif
  119. char fw_id[];
  120. };
  121. struct fw_cache_entry {
  122. struct list_head list;
  123. char name[];
  124. };
  125. struct fw_name_devm {
  126. unsigned long magic;
  127. char name[];
  128. };
  129. struct fw_desc {
  130. struct work_struct work;
  131. const struct firmware **firmware_p;
  132. const char *name;
  133. struct device *device;
  134. bool uevent;
  135. bool nowait;
  136. bool nocache;
  137. phys_addr_t dest_addr;
  138. size_t dest_size;
  139. void * (*map_fw_mem)(phys_addr_t phys, size_t size);
  140. void (*unmap_fw_mem)(void *virt);
  141. struct module *module;
  142. void *context;
  143. void (*cont)(const struct firmware *fw, void *context);
  144. };
  145. #define to_fwbuf(d) container_of(d, struct firmware_buf, ref)
  146. #define FW_LOADER_NO_CACHE 0
  147. #define FW_LOADER_START_CACHE 1
  148. static int fw_cache_piggyback_on_request(const char *name);
  149. /* fw_lock could be moved to 'struct firmware_priv' but since it is just
  150. * guarding for corner cases a global lock should be OK */
  151. static DEFINE_MUTEX(fw_lock);
  152. static struct firmware_cache fw_cache;
  153. static struct firmware_buf *__allocate_fw_buf(const char *fw_name,
  154. struct firmware_cache *fwc)
  155. {
  156. struct firmware_buf *buf;
  157. buf = kzalloc(sizeof(*buf) + strlen(fw_name) + 1 , GFP_ATOMIC);
  158. if (!buf)
  159. return buf;
  160. kref_init(&buf->ref);
  161. strcpy(buf->fw_id, fw_name);
  162. buf->fwc = fwc;
  163. init_completion(&buf->completion);
  164. #ifdef CONFIG_FW_LOADER_USER_HELPER
  165. INIT_LIST_HEAD(&buf->pending_list);
  166. #endif
  167. pr_debug("%s: fw-%s buf=%p\n", __func__, fw_name, buf);
  168. return buf;
  169. }
  170. static struct firmware_buf *__fw_lookup_buf(const char *fw_name)
  171. {
  172. struct firmware_buf *tmp;
  173. struct firmware_cache *fwc = &fw_cache;
  174. list_for_each_entry(tmp, &fwc->head, list)
  175. if (!strcmp(tmp->fw_id, fw_name))
  176. return tmp;
  177. return NULL;
  178. }
  179. static int fw_lookup_and_allocate_buf(const char *fw_name,
  180. struct firmware_cache *fwc,
  181. struct firmware_buf **buf)
  182. {
  183. struct firmware_buf *tmp;
  184. spin_lock(&fwc->lock);
  185. tmp = __fw_lookup_buf(fw_name);
  186. if (tmp) {
  187. kref_get(&tmp->ref);
  188. spin_unlock(&fwc->lock);
  189. *buf = tmp;
  190. return 1;
  191. }
  192. tmp = __allocate_fw_buf(fw_name, fwc);
  193. if (tmp)
  194. list_add(&tmp->list, &fwc->head);
  195. spin_unlock(&fwc->lock);
  196. *buf = tmp;
  197. return tmp ? 0 : -ENOMEM;
  198. }
  199. static struct firmware_buf *fw_lookup_buf(const char *fw_name)
  200. {
  201. struct firmware_buf *tmp;
  202. struct firmware_cache *fwc = &fw_cache;
  203. spin_lock(&fwc->lock);
  204. tmp = __fw_lookup_buf(fw_name);
  205. spin_unlock(&fwc->lock);
  206. return tmp;
  207. }
  208. static void __fw_free_buf(struct kref *ref)
  209. __releases(&fwc->lock)
  210. {
  211. struct firmware_buf *buf = to_fwbuf(ref);
  212. struct firmware_cache *fwc = buf->fwc;
  213. pr_debug("%s: fw-%s buf=%p data=%p size=%u\n",
  214. __func__, buf->fw_id, buf, buf->data,
  215. (unsigned int)buf->size);
  216. list_del(&buf->list);
  217. spin_unlock(&fwc->lock);
  218. #ifdef CONFIG_FW_LOADER_USER_HELPER
  219. if (buf->is_paged_buf) {
  220. int i;
  221. vunmap(buf->data);
  222. for (i = 0; i < buf->nr_pages; i++)
  223. __free_page(buf->pages[i]);
  224. vfree(buf->pages);
  225. } else
  226. #endif
  227. vfree(buf->data);
  228. kfree(buf);
  229. }
  230. static void fw_free_buf(struct firmware_buf *buf)
  231. {
  232. struct firmware_cache *fwc = buf->fwc;
  233. if (!fwc) {
  234. kfree(buf);
  235. return;
  236. }
  237. spin_lock(&fwc->lock);
  238. if (!kref_put(&buf->ref, __fw_free_buf))
  239. spin_unlock(&fwc->lock);
  240. }
  241. /* direct firmware loading support */
  242. static char fw_path_para[256];
  243. static const char * const fw_path[] = {
  244. fw_path_para,
  245. "/lib/firmware/updates/" UTS_RELEASE,
  246. "/lib/firmware/updates",
  247. "/lib/firmware/" UTS_RELEASE,
  248. "/lib/firmware"
  249. };
  250. /*
  251. * Typical usage is that passing 'firmware_class.path=$CUSTOMIZED_PATH'
  252. * from kernel command line because firmware_class is generally built in
  253. * kernel instead of module.
  254. */
  255. module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644);
  256. MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path");
  257. /* Don't inline this: 'struct kstat' is biggish */
  258. static noinline_for_stack int fw_file_size(struct file *file)
  259. {
  260. struct kstat st;
  261. if (vfs_getattr(file->f_path.mnt, file->f_path.dentry, &st))
  262. return -1;
  263. if (!S_ISREG(st.mode))
  264. return -1;
  265. if (st.size != (int)st.size)
  266. return -1;
  267. return st.size;
  268. }
  269. static bool fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf)
  270. {
  271. int size;
  272. char *buf;
  273. size = fw_file_size(file);
  274. if (size <= 0)
  275. return false;
  276. if (fw_buf->dest_size > 0 && fw_buf->dest_size < size)
  277. return false;
  278. if (fw_buf->dest_addr)
  279. buf = fw_buf->map_fw_mem(fw_buf->dest_addr,
  280. fw_buf->dest_size);
  281. else
  282. buf = vmalloc(size);
  283. if (!buf)
  284. return false;
  285. if (kernel_read(file, 0, buf, size) != size) {
  286. if (fw_buf->dest_addr)
  287. fw_buf->unmap_fw_mem(buf);
  288. else
  289. vfree(buf);
  290. return false;
  291. }
  292. fw_buf->data = buf;
  293. fw_buf->size = size;
  294. if (fw_buf->dest_addr)
  295. fw_buf->unmap_fw_mem(buf);
  296. return true;
  297. }
  298. static bool fw_get_filesystem_firmware(struct device *device,
  299. struct firmware_buf *buf,
  300. phys_addr_t dest_addr, size_t dest_size)
  301. {
  302. int i;
  303. bool success = false;
  304. char *path = __getname();
  305. for (i = 0; i < ARRAY_SIZE(fw_path); i++) {
  306. struct file *file;
  307. /* skip the unset customized path */
  308. if (!fw_path[i][0])
  309. continue;
  310. snprintf(path, PATH_MAX, "%s/%s", fw_path[i], buf->fw_id);
  311. file = filp_open(path, O_RDONLY, 0);
  312. if (IS_ERR(file))
  313. continue;
  314. success = fw_read_file_contents(file, buf);
  315. fput(file);
  316. if (success)
  317. break;
  318. }
  319. __putname(path);
  320. if (success) {
  321. dev_dbg(device, "firmware: direct-loading firmware %s\n",
  322. buf->fw_id);
  323. mutex_lock(&fw_lock);
  324. set_bit(FW_STATUS_DONE, &buf->status);
  325. complete_all(&buf->completion);
  326. mutex_unlock(&fw_lock);
  327. }
  328. return success;
  329. }
  330. /* firmware holds the ownership of pages */
  331. static void firmware_free_data(const struct firmware *fw)
  332. {
  333. /* Loaded directly? */
  334. if (!fw->priv) {
  335. vfree(fw->data);
  336. return;
  337. }
  338. fw_free_buf(fw->priv);
  339. }
  340. /* store the pages buffer info firmware from buf */
  341. static void fw_set_page_data(struct firmware_buf *buf, struct firmware *fw)
  342. {
  343. fw->priv = buf;
  344. #ifdef CONFIG_FW_LOADER_USER_HELPER
  345. fw->pages = buf->pages;
  346. #endif
  347. fw->size = buf->size;
  348. fw->data = buf->data;
  349. pr_debug("%s: fw-%s buf=%p data=%p size=%u\n",
  350. __func__, buf->fw_id, buf, buf->data,
  351. (unsigned int)buf->size);
  352. }
  353. #ifdef CONFIG_PM_SLEEP
  354. static void fw_name_devm_release(struct device *dev, void *res)
  355. {
  356. struct fw_name_devm *fwn = res;
  357. if (fwn->magic == (unsigned long)&fw_cache)
  358. pr_debug("%s: fw_name-%s devm-%p released\n",
  359. __func__, fwn->name, res);
  360. }
  361. static int fw_devm_match(struct device *dev, void *res,
  362. void *match_data)
  363. {
  364. struct fw_name_devm *fwn = res;
  365. return (fwn->magic == (unsigned long)&fw_cache) &&
  366. !strcmp(fwn->name, match_data);
  367. }
  368. static struct fw_name_devm *fw_find_devm_name(struct device *dev,
  369. const char *name)
  370. {
  371. struct fw_name_devm *fwn;
  372. fwn = devres_find(dev, fw_name_devm_release,
  373. fw_devm_match, (void *)name);
  374. return fwn;
  375. }
  376. /* add firmware name into devres list */
  377. static int fw_add_devm_name(struct device *dev, const char *name)
  378. {
  379. struct fw_name_devm *fwn;
  380. fwn = fw_find_devm_name(dev, name);
  381. if (fwn)
  382. return 1;
  383. fwn = devres_alloc(fw_name_devm_release, sizeof(struct fw_name_devm) +
  384. strlen(name) + 1, GFP_KERNEL);
  385. if (!fwn)
  386. return -ENOMEM;
  387. fwn->magic = (unsigned long)&fw_cache;
  388. strcpy(fwn->name, name);
  389. devres_add(dev, fwn);
  390. return 0;
  391. }
  392. #else
  393. static int fw_add_devm_name(struct device *dev, const char *name)
  394. {
  395. return 0;
  396. }
  397. #endif
  398. /*
  399. * user-mode helper code
  400. */
  401. #ifdef CONFIG_FW_LOADER_USER_HELPER
  402. struct firmware_priv {
  403. struct delayed_work timeout_work;
  404. bool nowait;
  405. struct device dev;
  406. struct firmware_buf *buf;
  407. struct firmware *fw;
  408. };
  409. static struct firmware_priv *to_firmware_priv(struct device *dev)
  410. {
  411. return container_of(dev, struct firmware_priv, dev);
  412. }
  413. static void __fw_load_abort(struct firmware_buf *buf)
  414. {
  415. /*
  416. * There is a small window in which user can write to 'loading'
  417. * between loading done and disappearance of 'loading'
  418. */
  419. if (test_bit(FW_STATUS_DONE, &buf->status))
  420. return;
  421. list_del_init(&buf->pending_list);
  422. set_bit(FW_STATUS_ABORT, &buf->status);
  423. complete_all(&buf->completion);
  424. }
  425. static void fw_load_abort(struct firmware_priv *fw_priv)
  426. {
  427. struct firmware_buf *buf = fw_priv->buf;
  428. __fw_load_abort(buf);
  429. /* avoid user action after loading abort */
  430. fw_priv->buf = NULL;
  431. }
  432. #define is_fw_load_aborted(buf) \
  433. test_bit(FW_STATUS_ABORT, &(buf)->status)
  434. static LIST_HEAD(pending_fw_head);
  435. /* reboot notifier for avoid deadlock with usermode_lock */
  436. static int fw_shutdown_notify(struct notifier_block *unused1,
  437. unsigned long unused2, void *unused3)
  438. {
  439. mutex_lock(&fw_lock);
  440. while (!list_empty(&pending_fw_head))
  441. __fw_load_abort(list_first_entry(&pending_fw_head,
  442. struct firmware_buf,
  443. pending_list));
  444. mutex_unlock(&fw_lock);
  445. return NOTIFY_DONE;
  446. }
  447. static struct notifier_block fw_shutdown_nb = {
  448. .notifier_call = fw_shutdown_notify,
  449. };
  450. static ssize_t firmware_timeout_show(struct class *class,
  451. struct class_attribute *attr,
  452. char *buf)
  453. {
  454. return sprintf(buf, "%d\n", loading_timeout);
  455. }
  456. /**
  457. * firmware_timeout_store - set number of seconds to wait for firmware
  458. * @class: device class pointer
  459. * @attr: device attribute pointer
  460. * @buf: buffer to scan for timeout value
  461. * @count: number of bytes in @buf
  462. *
  463. * Sets the number of seconds to wait for the firmware. Once
  464. * this expires an error will be returned to the driver and no
  465. * firmware will be provided.
  466. *
  467. * Note: zero means 'wait forever'.
  468. **/
  469. static ssize_t firmware_timeout_store(struct class *class,
  470. struct class_attribute *attr,
  471. const char *buf, size_t count)
  472. {
  473. loading_timeout = simple_strtol(buf, NULL, 10);
  474. if (loading_timeout < 0)
  475. loading_timeout = 0;
  476. return count;
  477. }
  478. static struct class_attribute firmware_class_attrs[] = {
  479. __ATTR(timeout, S_IWUSR | S_IRUGO,
  480. firmware_timeout_show, firmware_timeout_store),
  481. __ATTR_NULL
  482. };
  483. static void fw_dev_release(struct device *dev)
  484. {
  485. struct firmware_priv *fw_priv = to_firmware_priv(dev);
  486. kfree(fw_priv);
  487. module_put(THIS_MODULE);
  488. }
  489. static int do_firmware_uevent(struct firmware_priv *fw_priv, struct kobj_uevent_env *env)
  490. {
  491. if (add_uevent_var(env, "FIRMWARE=%s", fw_priv->buf->fw_id))
  492. return -ENOMEM;
  493. if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout))
  494. return -ENOMEM;
  495. if (add_uevent_var(env, "ASYNC=%d", fw_priv->nowait))
  496. return -ENOMEM;
  497. return 0;
  498. }
  499. static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
  500. {
  501. struct firmware_priv *fw_priv = to_firmware_priv(dev);
  502. int err = 0;
  503. mutex_lock(&fw_lock);
  504. if (fw_priv->buf)
  505. err = do_firmware_uevent(fw_priv, env);
  506. mutex_unlock(&fw_lock);
  507. return err;
  508. }
  509. static struct class firmware_class = {
  510. .name = "firmware",
  511. .class_attrs = firmware_class_attrs,
  512. .dev_uevent = firmware_uevent,
  513. .dev_release = fw_dev_release,
  514. };
  515. static ssize_t firmware_loading_show(struct device *dev,
  516. struct device_attribute *attr, char *buf)
  517. {
  518. struct firmware_priv *fw_priv = to_firmware_priv(dev);
  519. int loading = 0;
  520. mutex_lock(&fw_lock);
  521. if (fw_priv->buf)
  522. loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status);
  523. mutex_unlock(&fw_lock);
  524. return sprintf(buf, "%d\n", loading);
  525. }
  526. /* Some architectures don't have PAGE_KERNEL_RO */
  527. #ifndef PAGE_KERNEL_RO
  528. #define PAGE_KERNEL_RO PAGE_KERNEL
  529. #endif
  530. /* one pages buffer should be mapped/unmapped only once */
  531. static int fw_map_pages_buf(struct firmware_buf *buf)
  532. {
  533. if (!buf->is_paged_buf)
  534. return 0;
  535. vunmap(buf->data);
  536. buf->data = vmap(buf->pages, buf->nr_pages, 0, PAGE_KERNEL_RO);
  537. if (!buf->data)
  538. return -ENOMEM;
  539. return 0;
  540. }
  541. /**
  542. * firmware_loading_store - set value in the 'loading' control file
  543. * @dev: device pointer
  544. * @attr: device attribute pointer
  545. * @buf: buffer to scan for loading control value
  546. * @count: number of bytes in @buf
  547. *
  548. * The relevant values are:
  549. *
  550. * 1: Start a load, discarding any previous partial load.
  551. * 0: Conclude the load and hand the data to the driver code.
  552. * -1: Conclude the load with an error and discard any written data.
  553. **/
  554. static ssize_t firmware_loading_store(struct device *dev,
  555. struct device_attribute *attr,
  556. const char *buf, size_t count)
  557. {
  558. struct firmware_priv *fw_priv = to_firmware_priv(dev);
  559. struct firmware_buf *fw_buf;
  560. int loading = simple_strtol(buf, NULL, 10);
  561. int i;
  562. mutex_lock(&fw_lock);
  563. fw_buf = fw_priv->buf;
  564. if (!fw_buf)
  565. goto out;
  566. switch (loading) {
  567. case 1:
  568. /* discarding any previous partial load */
  569. if (!test_bit(FW_STATUS_DONE, &fw_buf->status)) {
  570. if (fw_buf->dest_addr) {
  571. set_bit(FW_STATUS_LOADING, &fw_buf->status);
  572. break;
  573. }
  574. for (i = 0; i < fw_buf->nr_pages; i++)
  575. __free_page(fw_buf->pages[i]);
  576. vfree(fw_buf->pages);
  577. fw_buf->pages = NULL;
  578. fw_buf->page_array_size = 0;
  579. fw_buf->nr_pages = 0;
  580. set_bit(FW_STATUS_LOADING, &fw_buf->status);
  581. }
  582. break;
  583. case 0:
  584. if (test_bit(FW_STATUS_LOADING, &fw_buf->status)) {
  585. set_bit(FW_STATUS_DONE, &fw_buf->status);
  586. clear_bit(FW_STATUS_LOADING, &fw_buf->status);
  587. /*
  588. * Several loading requests may be pending on
  589. * one same firmware buf, so let all requests
  590. * see the mapped 'buf->data' once the loading
  591. * is completed.
  592. * */
  593. if (fw_map_pages_buf(fw_buf))
  594. dev_err(dev, "%s: map pages failed\n",
  595. __func__);
  596. list_del_init(&fw_buf->pending_list);
  597. complete_all(&fw_buf->completion);
  598. break;
  599. }
  600. /* fallthrough */
  601. default:
  602. dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading);
  603. /* fallthrough */
  604. case -1:
  605. fw_load_abort(fw_priv);
  606. break;
  607. }
  608. out:
  609. mutex_unlock(&fw_lock);
  610. return count;
  611. }
  612. static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store);
  613. static int __firmware_data_rw(struct firmware_priv *fw_priv, char *buffer,
  614. loff_t *offset, size_t count, int read)
  615. {
  616. u8 __iomem *fw_buf;
  617. struct firmware_buf *buf = fw_priv->buf;
  618. int retval = count;
  619. if ((*offset + count) > buf->dest_size) {
  620. pr_debug("%s: Failed size check.\n", __func__);
  621. retval = -EINVAL;
  622. goto out;
  623. }
  624. fw_buf = buf->map_fw_mem(buf->dest_addr + *offset, count);
  625. if (!fw_buf) {
  626. pr_debug("%s: Failed ioremap.\n", __func__);
  627. retval = -ENOMEM;
  628. goto out;
  629. }
  630. if (read)
  631. memcpy(buffer, fw_buf, count);
  632. else
  633. memcpy(fw_buf, buffer, count);
  634. *offset += count;
  635. buf->unmap_fw_mem(fw_buf);
  636. out:
  637. return retval;
  638. }
  639. static ssize_t firmware_direct_read(struct file *filp, struct kobject *kobj,
  640. struct bin_attribute *bin_attr,
  641. char *buffer, loff_t offset, size_t count)
  642. {
  643. struct device *dev = kobj_to_dev(kobj);
  644. struct firmware_priv *fw_priv = to_firmware_priv(dev);
  645. struct firmware *fw;
  646. ssize_t ret_count;
  647. if (!fw_priv->fw)
  648. return -ENODEV;
  649. mutex_lock(&fw_lock);
  650. fw = fw_priv->fw;
  651. if (offset > fw->size) {
  652. ret_count = 0;
  653. goto out;
  654. }
  655. if (count > fw->size - offset)
  656. count = fw->size - offset;
  657. if (test_bit(FW_STATUS_DONE, &fw_priv->buf->status)) {
  658. ret_count = -ENODEV;
  659. goto out;
  660. }
  661. ret_count = __firmware_data_rw(fw_priv, buffer, &offset, count, 1);
  662. out:
  663. mutex_unlock(&fw_lock);
  664. return ret_count;
  665. }
  666. static ssize_t firmware_direct_write(struct file *filp, struct kobject *kobj,
  667. struct bin_attribute *bin_attr,
  668. char *buffer, loff_t offset, size_t count)
  669. {
  670. struct device *dev = kobj_to_dev(kobj);
  671. struct firmware_priv *fw_priv = to_firmware_priv(dev);
  672. struct firmware *fw;
  673. ssize_t retval;
  674. if (!capable(CAP_SYS_RAWIO))
  675. return -EPERM;
  676. mutex_lock(&fw_lock);
  677. fw = fw_priv->fw;
  678. if (!fw || test_bit(FW_STATUS_DONE, &fw_priv->buf->status)) {
  679. retval = -ENODEV;
  680. goto out;
  681. }
  682. retval = __firmware_data_rw(fw_priv, buffer, &offset, count, 0);
  683. if (retval < 0)
  684. goto out;
  685. fw_priv->buf->size = max_t(size_t, offset, fw_priv->buf->size);
  686. out:
  687. mutex_unlock(&fw_lock);
  688. return retval;
  689. }
  690. static struct bin_attribute firmware_direct_attr_data = {
  691. .attr = { .name = "data", .mode = 0644 },
  692. .size = 0,
  693. .read = firmware_direct_read,
  694. .write = firmware_direct_write,
  695. };
  696. static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
  697. struct bin_attribute *bin_attr,
  698. char *buffer, loff_t offset, size_t count)
  699. {
  700. struct device *dev = kobj_to_dev(kobj);
  701. struct firmware_priv *fw_priv = to_firmware_priv(dev);
  702. struct firmware_buf *buf;
  703. ssize_t ret_count;
  704. mutex_lock(&fw_lock);
  705. buf = fw_priv->buf;
  706. if (!buf || test_bit(FW_STATUS_DONE, &buf->status)) {
  707. ret_count = -ENODEV;
  708. goto out;
  709. }
  710. if (offset > buf->size) {
  711. ret_count = 0;
  712. goto out;
  713. }
  714. if (count > buf->size - offset)
  715. count = buf->size - offset;
  716. ret_count = count;
  717. while (count) {
  718. void *page_data;
  719. int page_nr = offset >> PAGE_SHIFT;
  720. int page_ofs = offset & (PAGE_SIZE-1);
  721. int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
  722. page_data = kmap(buf->pages[page_nr]);
  723. memcpy(buffer, page_data + page_ofs, page_cnt);
  724. kunmap(buf->pages[page_nr]);
  725. buffer += page_cnt;
  726. offset += page_cnt;
  727. count -= page_cnt;
  728. }
  729. out:
  730. mutex_unlock(&fw_lock);
  731. return ret_count;
  732. }
  733. static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
  734. {
  735. struct firmware_buf *buf = fw_priv->buf;
  736. int pages_needed = ALIGN(min_size, PAGE_SIZE) >> PAGE_SHIFT;
  737. /* If the array of pages is too small, grow it... */
  738. if (buf->page_array_size < pages_needed) {
  739. int new_array_size = max(pages_needed,
  740. buf->page_array_size * 2);
  741. struct page **new_pages;
  742. new_pages = vmalloc(new_array_size * sizeof(void *));
  743. if (!new_pages) {
  744. fw_load_abort(fw_priv);
  745. return -ENOMEM;
  746. }
  747. memcpy(new_pages, buf->pages,
  748. buf->page_array_size * sizeof(void *));
  749. memset(&new_pages[buf->page_array_size], 0, sizeof(void *) *
  750. (new_array_size - buf->page_array_size));
  751. vfree(buf->pages);
  752. buf->pages = new_pages;
  753. buf->page_array_size = new_array_size;
  754. }
  755. while (buf->nr_pages < pages_needed) {
  756. buf->pages[buf->nr_pages] =
  757. alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
  758. if (!buf->pages[buf->nr_pages]) {
  759. fw_load_abort(fw_priv);
  760. return -ENOMEM;
  761. }
  762. buf->nr_pages++;
  763. }
  764. return 0;
  765. }
  766. /**
  767. * firmware_data_write - write method for firmware
  768. * @filp: open sysfs file
  769. * @kobj: kobject for the device
  770. * @bin_attr: bin_attr structure
  771. * @buffer: buffer being written
  772. * @offset: buffer offset for write in total data store area
  773. * @count: buffer size
  774. *
  775. * Data written to the 'data' attribute will be later handed to
  776. * the driver as a firmware image.
  777. **/
  778. static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
  779. struct bin_attribute *bin_attr,
  780. char *buffer, loff_t offset, size_t count)
  781. {
  782. struct device *dev = kobj_to_dev(kobj);
  783. struct firmware_priv *fw_priv = to_firmware_priv(dev);
  784. struct firmware_buf *buf;
  785. ssize_t retval;
  786. if (!capable(CAP_SYS_RAWIO))
  787. return -EPERM;
  788. mutex_lock(&fw_lock);
  789. buf = fw_priv->buf;
  790. if (!buf || test_bit(FW_STATUS_DONE, &buf->status)) {
  791. retval = -ENODEV;
  792. goto out;
  793. }
  794. retval = fw_realloc_buffer(fw_priv, offset + count);
  795. if (retval)
  796. goto out;
  797. retval = count;
  798. while (count) {
  799. void *page_data;
  800. int page_nr = offset >> PAGE_SHIFT;
  801. int page_ofs = offset & (PAGE_SIZE - 1);
  802. int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
  803. page_data = kmap(buf->pages[page_nr]);
  804. memcpy(page_data + page_ofs, buffer, page_cnt);
  805. kunmap(buf->pages[page_nr]);
  806. buffer += page_cnt;
  807. offset += page_cnt;
  808. count -= page_cnt;
  809. }
  810. buf->size = max_t(size_t, offset, buf->size);
  811. out:
  812. mutex_unlock(&fw_lock);
  813. return retval;
  814. }
  815. static struct bin_attribute firmware_attr_data = {
  816. .attr = { .name = "data", .mode = 0644 },
  817. .size = 0,
  818. .read = firmware_data_read,
  819. .write = firmware_data_write,
  820. };
  821. static void firmware_class_timeout_work(struct work_struct *work)
  822. {
  823. struct firmware_priv *fw_priv = container_of(work,
  824. struct firmware_priv, timeout_work.work);
  825. mutex_lock(&fw_lock);
  826. fw_load_abort(fw_priv);
  827. mutex_unlock(&fw_lock);
  828. }
  829. static struct firmware_priv *
  830. fw_create_instance(struct firmware *firmware, struct fw_desc *desc)
  831. {
  832. struct firmware_priv *fw_priv;
  833. struct device *f_dev;
  834. fw_priv = kzalloc(sizeof(*fw_priv), GFP_KERNEL);
  835. if (!fw_priv) {
  836. dev_err(desc->device, "%s: kmalloc failed\n", __func__);
  837. fw_priv = ERR_PTR(-ENOMEM);
  838. goto exit;
  839. }
  840. fw_priv->nowait = desc->nowait;
  841. fw_priv->fw = firmware;
  842. INIT_DELAYED_WORK(&fw_priv->timeout_work,
  843. firmware_class_timeout_work);
  844. f_dev = &fw_priv->dev;
  845. device_initialize(f_dev);
  846. dev_set_name(f_dev, "%s", desc->name);
  847. f_dev->parent = desc->device;
  848. f_dev->class = &firmware_class;
  849. exit:
  850. return fw_priv;
  851. }
  852. /* load a firmware via user helper */
  853. static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
  854. long timeout)
  855. {
  856. int retval = 0;
  857. struct device *f_dev = &fw_priv->dev;
  858. struct firmware_buf *buf = fw_priv->buf;
  859. struct bin_attribute *fw_attr_data = buf->dest_addr ?
  860. &firmware_direct_attr_data : &firmware_attr_data;
  861. /* fall back on userspace loading */
  862. buf->is_paged_buf = buf->dest_addr ? false : true;
  863. dev_set_uevent_suppress(f_dev, true);
  864. /* Need to pin this module until class device is destroyed */
  865. __module_get(THIS_MODULE);
  866. retval = device_add(f_dev);
  867. if (retval) {
  868. dev_err(f_dev, "%s: device_register failed\n", __func__);
  869. goto err_put_dev;
  870. }
  871. retval = device_create_bin_file(f_dev, fw_attr_data);
  872. if (retval) {
  873. dev_err(f_dev, "%s: sysfs_create_bin_file failed\n", __func__);
  874. goto err_del_dev;
  875. }
  876. mutex_lock(&fw_lock);
  877. list_add(&buf->pending_list, &pending_fw_head);
  878. mutex_unlock(&fw_lock);
  879. retval = device_create_file(f_dev, &dev_attr_loading);
  880. if (retval) {
  881. mutex_lock(&fw_lock);
  882. list_del_init(&buf->pending_list);
  883. mutex_unlock(&fw_lock);
  884. dev_err(f_dev, "%s: device_create_file failed\n", __func__);
  885. goto err_del_bin_attr;
  886. }
  887. if (uevent) {
  888. dev_set_uevent_suppress(f_dev, false);
  889. dev_dbg(f_dev, "firmware: requesting %s\n", buf->fw_id);
  890. if (timeout != MAX_SCHEDULE_TIMEOUT)
  891. schedule_delayed_work(&fw_priv->timeout_work, timeout);
  892. kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD);
  893. }
  894. retval = wait_for_completion_interruptible(&buf->completion);
  895. cancel_delayed_work_sync(&fw_priv->timeout_work);
  896. if (is_fw_load_aborted(buf))
  897. retval = -EAGAIN;
  898. else if (!buf->data)
  899. retval = -ENOMEM;
  900. if (!buf->data && buf->is_paged_buf)
  901. retval = -ENOMEM;
  902. device_remove_file(f_dev, &dev_attr_loading);
  903. err_del_bin_attr:
  904. device_remove_bin_file(f_dev, &firmware_attr_data);
  905. err_del_dev:
  906. device_del(f_dev);
  907. err_put_dev:
  908. put_device(f_dev);
  909. return retval;
  910. }
  911. static int fw_load_from_user_helper(struct firmware *firmware,
  912. struct fw_desc *desc, long timeout)
  913. {
  914. struct firmware_priv *fw_priv;
  915. fw_priv = fw_create_instance(firmware, desc);
  916. if (IS_ERR(fw_priv))
  917. return PTR_ERR(fw_priv);
  918. fw_priv->buf = firmware->priv;
  919. return _request_firmware_load(fw_priv, desc->uevent, timeout);
  920. }
  921. #else /* CONFIG_FW_LOADER_USER_HELPER */
  922. static inline int
  923. fw_load_from_user_helper(struct firmware *firmware, const char *name,
  924. struct device *device, bool uevent, bool nowait,
  925. long timeout)
  926. {
  927. return -ENOENT;
  928. }
  929. /* No abort during direct loading */
  930. #define is_fw_load_aborted(buf) false
  931. #endif /* CONFIG_FW_LOADER_USER_HELPER */
  932. /* wait until the shared firmware_buf becomes ready (or error) */
  933. static int sync_cached_firmware_buf(struct firmware_buf *buf)
  934. {
  935. int ret = 0;
  936. mutex_lock(&fw_lock);
  937. while (!test_bit(FW_STATUS_DONE, &buf->status)) {
  938. if (is_fw_load_aborted(buf)) {
  939. ret = -ENOENT;
  940. break;
  941. }
  942. mutex_unlock(&fw_lock);
  943. ret = wait_for_completion_interruptible(&buf->completion);
  944. mutex_lock(&fw_lock);
  945. }
  946. mutex_unlock(&fw_lock);
  947. return ret;
  948. }
  949. /* prepare firmware and firmware_buf structs;
  950. * return 0 if a firmware is already assigned, 1 if need to load one,
  951. * or a negative error code
  952. */
  953. static int
  954. _request_firmware_prepare(struct firmware **firmware_p, struct fw_desc *desc)
  955. {
  956. struct firmware *firmware;
  957. struct firmware_buf *buf;
  958. int ret;
  959. *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
  960. if (!firmware) {
  961. dev_err(desc->device, "%s: kmalloc(struct firmware) failed\n",
  962. __func__);
  963. return -ENOMEM;
  964. }
  965. if (fw_get_builtin_firmware(firmware, desc->name)) {
  966. dev_dbg(desc->device, "firmware: using built-in firmware %s\n",
  967. desc->name);
  968. return 0; /* assigned */
  969. }
  970. if (desc->nocache) {
  971. buf = __allocate_fw_buf(desc->name, NULL);
  972. if (!buf)
  973. return -ENOMEM;
  974. buf->dest_addr = desc->dest_addr;
  975. buf->dest_size = desc->dest_size;
  976. buf->map_fw_mem = desc->map_fw_mem;
  977. buf->unmap_fw_mem = desc->unmap_fw_mem;
  978. firmware->priv = buf;
  979. return 1;
  980. }
  981. ret = fw_lookup_and_allocate_buf(desc->name, &fw_cache, &buf);
  982. /*
  983. * bind with 'buf' now to avoid warning in failure path
  984. * of requesting firmware.
  985. */
  986. firmware->priv = buf;
  987. if (ret > 0) {
  988. ret = sync_cached_firmware_buf(buf);
  989. if (!ret) {
  990. fw_set_page_data(buf, firmware);
  991. return 0; /* assigned */
  992. }
  993. }
  994. if (ret < 0)
  995. return ret;
  996. return 1; /* need to load */
  997. }
  998. static int assign_firmware_buf(struct firmware *fw, struct device *device,
  999. bool nocache)
  1000. {
  1001. struct firmware_buf *buf = fw->priv;
  1002. mutex_lock(&fw_lock);
  1003. if (!buf->size || is_fw_load_aborted(buf)) {
  1004. mutex_unlock(&fw_lock);
  1005. return -ENOENT;
  1006. }
  1007. /*
  1008. * add firmware name into devres list so that we can auto cache
  1009. * and uncache firmware for device.
  1010. *
  1011. * device may has been deleted already, but the problem
  1012. * should be fixed in devres or driver core.
  1013. */
  1014. if (device && !nocache)
  1015. fw_add_devm_name(device, buf->fw_id);
  1016. /*
  1017. * After caching firmware image is started, let it piggyback
  1018. * on request firmware.
  1019. */
  1020. if (!nocache && (buf->fwc->state == FW_LOADER_START_CACHE)) {
  1021. if (fw_cache_piggyback_on_request(buf->fw_id))
  1022. kref_get(&buf->ref);
  1023. }
  1024. /* pass the pages buffer to driver at the last minute */
  1025. fw_set_page_data(buf, fw);
  1026. mutex_unlock(&fw_lock);
  1027. return 0;
  1028. }
  1029. /* called from request_firmware() and request_firmware_work_func() */
  1030. static int _request_firmware(struct fw_desc *desc)
  1031. {
  1032. struct firmware *fw;
  1033. long timeout;
  1034. int ret;
  1035. if (!desc->firmware_p)
  1036. return -EINVAL;
  1037. ret = _request_firmware_prepare(&fw, desc);
  1038. if (ret <= 0) /* error or already assigned */
  1039. goto out;
  1040. ret = 0;
  1041. timeout = firmware_loading_timeout();
  1042. if (desc->nowait) {
  1043. timeout = usermodehelper_read_lock_wait(timeout);
  1044. if (!timeout) {
  1045. dev_dbg(desc->device, "firmware: %s loading timed out\n",
  1046. desc->name);
  1047. ret = -EBUSY;
  1048. goto out;
  1049. }
  1050. } else {
  1051. ret = usermodehelper_read_trylock();
  1052. if (WARN_ON(ret)) {
  1053. dev_err(desc->device, "firmware: %s will not be loaded\n",
  1054. desc->name);
  1055. goto out;
  1056. }
  1057. }
  1058. if (!fw_get_filesystem_firmware(desc->device, fw->priv,
  1059. desc->dest_addr, desc->dest_size))
  1060. ret = fw_load_from_user_helper(fw, desc, timeout);
  1061. if (!ret)
  1062. ret = assign_firmware_buf(fw, desc->device, desc->nocache);
  1063. usermodehelper_read_unlock();
  1064. out:
  1065. if (ret < 0) {
  1066. release_firmware(fw);
  1067. fw = NULL;
  1068. }
  1069. *desc->firmware_p = fw;
  1070. return ret;
  1071. }
  1072. /**
  1073. * request_firmware: - send firmware request and wait for it
  1074. * @firmware_p: pointer to firmware image
  1075. * @name: name of firmware file
  1076. * @device: device for which firmware is being loaded
  1077. *
  1078. * @firmware_p will be used to return a firmware image by the name
  1079. * of @name for device @device.
  1080. *
  1081. * Should be called from user context where sleeping is allowed.
  1082. *
  1083. * @name will be used as $FIRMWARE in the uevent environment and
  1084. * should be distinctive enough not to be confused with any other
  1085. * firmware image for this or any other device.
  1086. *
  1087. * Caller must hold the reference count of @device.
  1088. *
  1089. * The function can be called safely inside device's suspend and
  1090. * resume callback.
  1091. **/
  1092. int
  1093. request_firmware(const struct firmware **firmware_p, const char *name,
  1094. struct device *device)
  1095. {
  1096. struct fw_desc desc;
  1097. desc.firmware_p = firmware_p;
  1098. desc.name = name;
  1099. desc.device = device;
  1100. desc.uevent = true;
  1101. desc.nowait = false;
  1102. desc.nocache = false;
  1103. desc.dest_addr = 0;
  1104. desc.dest_size = 0;
  1105. return _request_firmware(&desc);
  1106. }
  1107. /**
  1108. * request_firmware_direct: - send firmware request and wait for it
  1109. * @dest_addr: Destination address for the firmware
  1110. * @dest_size: Size of destination buffer
  1111. *
  1112. * Similar to request_firmware, except takes in a buffer address and
  1113. * copies firmware data directly to that buffer. Returns the size of
  1114. * the firmware that was loaded at dest_addr. This API prevents the
  1115. * caching of images.
  1116. */
  1117. int
  1118. request_firmware_direct(const char *name, struct device *device,
  1119. phys_addr_t dest_addr, size_t dest_size,
  1120. void * (*map_fw_mem)(phys_addr_t phys, size_t size),
  1121. void (*unmap_fw_mem)(void *virt))
  1122. {
  1123. struct fw_desc desc;
  1124. const struct firmware *fp = NULL;
  1125. int ret;
  1126. if (dest_addr && !map_fw_mem)
  1127. return -EINVAL;
  1128. if (dest_addr && dest_size <= 0)
  1129. return -EINVAL;
  1130. desc.firmware_p = &fp;
  1131. desc.name = name;
  1132. desc.device = device;
  1133. desc.uevent = true;
  1134. desc.nowait = false;
  1135. desc.nocache = true;
  1136. desc.dest_addr = dest_addr;
  1137. desc.dest_size = dest_size;
  1138. desc.map_fw_mem = map_fw_mem;
  1139. desc.unmap_fw_mem = unmap_fw_mem;
  1140. ret = _request_firmware(&desc);
  1141. if (ret)
  1142. return ret;
  1143. ret = fp->size;
  1144. release_firmware(fp);
  1145. return ret;
  1146. }
  1147. /**
  1148. * release_firmware: - release the resource associated with a firmware image
  1149. * @fw: firmware resource to release
  1150. **/
  1151. void release_firmware(const struct firmware *fw)
  1152. {
  1153. if (fw) {
  1154. if (!fw_is_builtin_firmware(fw))
  1155. firmware_free_data(fw);
  1156. kfree(fw);
  1157. }
  1158. }
  1159. /* Async support */
  1160. static void request_firmware_work_func(struct work_struct *work)
  1161. {
  1162. const struct firmware *fw;
  1163. struct fw_desc *desc;
  1164. desc = container_of(work, struct fw_desc, work);
  1165. desc->firmware_p = &fw;
  1166. _request_firmware(desc);
  1167. desc->cont(fw, desc->context);
  1168. put_device(desc->device); /* taken in request_firmware_nowait() */
  1169. module_put(desc->module);
  1170. kfree(desc);
  1171. }
  1172. int
  1173. _request_firmware_nowait(
  1174. struct module *module, bool uevent,
  1175. const char *name, struct device *device, gfp_t gfp, void *context,
  1176. void (*cont)(const struct firmware *fw, void *context),
  1177. bool nocache, phys_addr_t dest_addr, size_t dest_size,
  1178. void * (*map_fw_mem)(phys_addr_t phys, size_t size),
  1179. void (*unmap_fw_mem)(void *virt))
  1180. {
  1181. struct fw_desc *desc;
  1182. if (dest_addr && !map_fw_mem)
  1183. return -EINVAL;
  1184. if (dest_addr && dest_size <= 0)
  1185. return -EINVAL;
  1186. desc = kzalloc(sizeof(struct fw_desc), gfp);
  1187. if (!desc)
  1188. return -ENOMEM;
  1189. desc->module = module;
  1190. desc->name = name;
  1191. desc->device = device;
  1192. desc->context = context;
  1193. desc->cont = cont;
  1194. desc->uevent = uevent;
  1195. desc->nocache = nocache;
  1196. desc->dest_addr = dest_addr;
  1197. desc->dest_size = dest_size;
  1198. desc->map_fw_mem = map_fw_mem;
  1199. desc->unmap_fw_mem = unmap_fw_mem;
  1200. if (!try_module_get(module)) {
  1201. kfree(desc);
  1202. return -EFAULT;
  1203. }
  1204. get_device(desc->device);
  1205. INIT_WORK(&desc->work, request_firmware_work_func);
  1206. schedule_work(&desc->work);
  1207. return 0;
  1208. }
  1209. /**
  1210. * request_firmware_nowait - asynchronous version of request_firmware
  1211. * @module: module requesting the firmware
  1212. * @uevent: sends uevent to copy the firmware image if this flag
  1213. * is non-zero else the firmware copy must be done manually.
  1214. * @name: name of firmware file
  1215. * @device: device for which firmware is being loaded
  1216. * @gfp: allocation flags
  1217. * @context: will be passed over to @cont, and
  1218. * @fw may be %NULL if firmware request fails.
  1219. * @cont: function will be called asynchronously when the firmware
  1220. * request is over.
  1221. *
  1222. * Caller must hold the reference count of @device.
  1223. *
  1224. * Asynchronous variant of request_firmware() for user contexts:
  1225. * - sleep for as small periods as possible since it may
  1226. * increase kernel boot time of built-in device drivers
  1227. * requesting firmware in their ->probe() methods, if
  1228. * @gfp is GFP_KERNEL.
  1229. *
  1230. * - can't sleep at all if @gfp is GFP_ATOMIC.
  1231. **/
  1232. int
  1233. request_firmware_nowait(
  1234. struct module *module, bool uevent,
  1235. const char *name, struct device *device, gfp_t gfp, void *context,
  1236. void (*cont)(const struct firmware *fw, void *context))
  1237. {
  1238. return _request_firmware_nowait(module, uevent, name, device, gfp,
  1239. context, cont, false, 0, 0, NULL, NULL);
  1240. }
  1241. /**
  1242. * request_firmware_nowait_direct - asynchronous version of request_firmware
  1243. * @dest_addr: Destination address for the firmware
  1244. * @dest_size: Size of destination buffer
  1245. *
  1246. * Similar to request_firmware_nowait, except loads the firmware
  1247. * directly to a destination address without using an intermediate
  1248. * buffer.
  1249. *
  1250. **/
  1251. int
  1252. request_firmware_nowait_direct(
  1253. struct module *module, bool uevent,
  1254. const char *name, struct device *device, gfp_t gfp, void *context,
  1255. void (*cont)(const struct firmware *fw, void *context),
  1256. phys_addr_t dest_addr, size_t dest_size,
  1257. void * (*map_fw_mem)(phys_addr_t phys, size_t size),
  1258. void (*unmap_fw_mem)(void *virt))
  1259. {
  1260. return _request_firmware_nowait(module, uevent, name, device, gfp,
  1261. context, cont, true, dest_addr,
  1262. dest_size, map_fw_mem, unmap_fw_mem);
  1263. }
  1264. /**
  1265. * cache_firmware - cache one firmware image in kernel memory space
  1266. * @fw_name: the firmware image name
  1267. *
  1268. * Cache firmware in kernel memory so that drivers can use it when
  1269. * system isn't ready for them to request firmware image from userspace.
  1270. * Once it returns successfully, driver can use request_firmware or its
  1271. * nowait version to get the cached firmware without any interacting
  1272. * with userspace
  1273. *
  1274. * Return 0 if the firmware image has been cached successfully
  1275. * Return !0 otherwise
  1276. *
  1277. */
  1278. int cache_firmware(const char *fw_name)
  1279. {
  1280. int ret;
  1281. const struct firmware *fw;
  1282. pr_debug("%s: %s\n", __func__, fw_name);
  1283. ret = request_firmware(&fw, fw_name, NULL);
  1284. if (!ret)
  1285. kfree(fw);
  1286. pr_debug("%s: %s ret=%d\n", __func__, fw_name, ret);
  1287. return ret;
  1288. }
  1289. /**
  1290. * uncache_firmware - remove one cached firmware image
  1291. * @fw_name: the firmware image name
  1292. *
  1293. * Uncache one firmware image which has been cached successfully
  1294. * before.
  1295. *
  1296. * Return 0 if the firmware cache has been removed successfully
  1297. * Return !0 otherwise
  1298. *
  1299. */
  1300. int uncache_firmware(const char *fw_name)
  1301. {
  1302. struct firmware_buf *buf;
  1303. struct firmware fw;
  1304. pr_debug("%s: %s\n", __func__, fw_name);
  1305. if (fw_get_builtin_firmware(&fw, fw_name))
  1306. return 0;
  1307. buf = fw_lookup_buf(fw_name);
  1308. if (buf) {
  1309. fw_free_buf(buf);
  1310. return 0;
  1311. }
  1312. return -EINVAL;
  1313. }
  1314. #ifdef CONFIG_FW_CACHE
  1315. static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain);
  1316. static struct fw_cache_entry *alloc_fw_cache_entry(const char *name)
  1317. {
  1318. struct fw_cache_entry *fce;
  1319. fce = kzalloc(sizeof(*fce) + strlen(name) + 1, GFP_ATOMIC);
  1320. if (!fce)
  1321. goto exit;
  1322. strcpy(fce->name, name);
  1323. exit:
  1324. return fce;
  1325. }
  1326. static int __fw_entry_found(const char *name)
  1327. {
  1328. struct firmware_cache *fwc = &fw_cache;
  1329. struct fw_cache_entry *fce;
  1330. list_for_each_entry(fce, &fwc->fw_names, list) {
  1331. if (!strcmp(fce->name, name))
  1332. return 1;
  1333. }
  1334. return 0;
  1335. }
  1336. static int fw_cache_piggyback_on_request(const char *name)
  1337. {
  1338. struct firmware_cache *fwc = &fw_cache;
  1339. struct fw_cache_entry *fce;
  1340. int ret = 0;
  1341. spin_lock(&fwc->name_lock);
  1342. if (__fw_entry_found(name))
  1343. goto found;
  1344. fce = alloc_fw_cache_entry(name);
  1345. if (fce) {
  1346. ret = 1;
  1347. list_add(&fce->list, &fwc->fw_names);
  1348. pr_debug("%s: fw: %s\n", __func__, name);
  1349. }
  1350. found:
  1351. spin_unlock(&fwc->name_lock);
  1352. return ret;
  1353. }
  1354. static void free_fw_cache_entry(struct fw_cache_entry *fce)
  1355. {
  1356. kfree(fce);
  1357. }
  1358. static void __async_dev_cache_fw_image(void *fw_entry,
  1359. async_cookie_t cookie)
  1360. {
  1361. struct fw_cache_entry *fce = fw_entry;
  1362. struct firmware_cache *fwc = &fw_cache;
  1363. int ret;
  1364. ret = cache_firmware(fce->name);
  1365. if (ret) {
  1366. spin_lock(&fwc->name_lock);
  1367. list_del(&fce->list);
  1368. spin_unlock(&fwc->name_lock);
  1369. free_fw_cache_entry(fce);
  1370. }
  1371. }
  1372. /* called with dev->devres_lock held */
  1373. static void dev_create_fw_entry(struct device *dev, void *res,
  1374. void *data)
  1375. {
  1376. struct fw_name_devm *fwn = res;
  1377. const char *fw_name = fwn->name;
  1378. struct list_head *head = data;
  1379. struct fw_cache_entry *fce;
  1380. fce = alloc_fw_cache_entry(fw_name);
  1381. if (fce)
  1382. list_add(&fce->list, head);
  1383. }
  1384. static int devm_name_match(struct device *dev, void *res,
  1385. void *match_data)
  1386. {
  1387. struct fw_name_devm *fwn = res;
  1388. return (fwn->magic == (unsigned long)match_data);
  1389. }
  1390. static void dev_cache_fw_image(struct device *dev, void *data)
  1391. {
  1392. LIST_HEAD(todo);
  1393. struct fw_cache_entry *fce;
  1394. struct fw_cache_entry *fce_next;
  1395. struct firmware_cache *fwc = &fw_cache;
  1396. devres_for_each_res(dev, fw_name_devm_release,
  1397. devm_name_match, &fw_cache,
  1398. dev_create_fw_entry, &todo);
  1399. list_for_each_entry_safe(fce, fce_next, &todo, list) {
  1400. list_del(&fce->list);
  1401. spin_lock(&fwc->name_lock);
  1402. /* only one cache entry for one firmware */
  1403. if (!__fw_entry_found(fce->name)) {
  1404. list_add(&fce->list, &fwc->fw_names);
  1405. } else {
  1406. free_fw_cache_entry(fce);
  1407. fce = NULL;
  1408. }
  1409. spin_unlock(&fwc->name_lock);
  1410. if (fce)
  1411. async_schedule_domain(__async_dev_cache_fw_image,
  1412. (void *)fce,
  1413. &fw_cache_domain);
  1414. }
  1415. }
  1416. static void __device_uncache_fw_images(void)
  1417. {
  1418. struct firmware_cache *fwc = &fw_cache;
  1419. struct fw_cache_entry *fce;
  1420. spin_lock(&fwc->name_lock);
  1421. while (!list_empty(&fwc->fw_names)) {
  1422. fce = list_entry(fwc->fw_names.next,
  1423. struct fw_cache_entry, list);
  1424. list_del(&fce->list);
  1425. spin_unlock(&fwc->name_lock);
  1426. uncache_firmware(fce->name);
  1427. free_fw_cache_entry(fce);
  1428. spin_lock(&fwc->name_lock);
  1429. }
  1430. spin_unlock(&fwc->name_lock);
  1431. }
  1432. /**
  1433. * device_cache_fw_images - cache devices' firmware
  1434. *
  1435. * If one device called request_firmware or its nowait version
  1436. * successfully before, the firmware names are recored into the
  1437. * device's devres link list, so device_cache_fw_images can call
  1438. * cache_firmware() to cache these firmwares for the device,
  1439. * then the device driver can load its firmwares easily at
  1440. * time when system is not ready to complete loading firmware.
  1441. */
  1442. static void device_cache_fw_images(void)
  1443. {
  1444. struct firmware_cache *fwc = &fw_cache;
  1445. int old_timeout;
  1446. DEFINE_WAIT(wait);
  1447. pr_debug("%s\n", __func__);
  1448. /* cancel uncache work */
  1449. cancel_delayed_work_sync(&fwc->work);
  1450. /*
  1451. * use small loading timeout for caching devices' firmware
  1452. * because all these firmware images have been loaded
  1453. * successfully at lease once, also system is ready for
  1454. * completing firmware loading now. The maximum size of
  1455. * firmware in current distributions is about 2M bytes,
  1456. * so 10 secs should be enough.
  1457. */
  1458. old_timeout = loading_timeout;
  1459. loading_timeout = 10;
  1460. mutex_lock(&fw_lock);
  1461. fwc->state = FW_LOADER_START_CACHE;
  1462. dpm_for_each_dev(NULL, dev_cache_fw_image);
  1463. mutex_unlock(&fw_lock);
  1464. /* wait for completion of caching firmware for all devices */
  1465. async_synchronize_full_domain(&fw_cache_domain);
  1466. loading_timeout = old_timeout;
  1467. }
  1468. /**
  1469. * device_uncache_fw_images - uncache devices' firmware
  1470. *
  1471. * uncache all firmwares which have been cached successfully
  1472. * by device_uncache_fw_images earlier
  1473. */
  1474. static void device_uncache_fw_images(void)
  1475. {
  1476. pr_debug("%s\n", __func__);
  1477. __device_uncache_fw_images();
  1478. }
  1479. static void device_uncache_fw_images_work(struct work_struct *work)
  1480. {
  1481. device_uncache_fw_images();
  1482. }
  1483. /**
  1484. * device_uncache_fw_images_delay - uncache devices firmwares
  1485. * @delay: number of milliseconds to delay uncache device firmwares
  1486. *
  1487. * uncache all devices's firmwares which has been cached successfully
  1488. * by device_cache_fw_images after @delay milliseconds.
  1489. */
  1490. static void device_uncache_fw_images_delay(unsigned long delay)
  1491. {
  1492. schedule_delayed_work(&fw_cache.work,
  1493. msecs_to_jiffies(delay));
  1494. }
  1495. static int fw_pm_notify(struct notifier_block *notify_block,
  1496. unsigned long mode, void *unused)
  1497. {
  1498. switch (mode) {
  1499. case PM_HIBERNATION_PREPARE:
  1500. case PM_SUSPEND_PREPARE:
  1501. device_cache_fw_images();
  1502. break;
  1503. case PM_POST_SUSPEND:
  1504. case PM_POST_HIBERNATION:
  1505. case PM_POST_RESTORE:
  1506. /*
  1507. * In case that system sleep failed and syscore_suspend is
  1508. * not called.
  1509. */
  1510. mutex_lock(&fw_lock);
  1511. fw_cache.state = FW_LOADER_NO_CACHE;
  1512. mutex_unlock(&fw_lock);
  1513. device_uncache_fw_images_delay(10 * MSEC_PER_SEC);
  1514. break;
  1515. }
  1516. return 0;
  1517. }
  1518. /* stop caching firmware once syscore_suspend is reached */
  1519. static int fw_suspend(void)
  1520. {
  1521. fw_cache.state = FW_LOADER_NO_CACHE;
  1522. return 0;
  1523. }
  1524. static struct syscore_ops fw_syscore_ops = {
  1525. .suspend = fw_suspend,
  1526. };
  1527. #else
  1528. static int fw_cache_piggyback_on_request(const char *name)
  1529. {
  1530. return 0;
  1531. }
  1532. #endif
  1533. static void __init fw_cache_init(void)
  1534. {
  1535. spin_lock_init(&fw_cache.lock);
  1536. INIT_LIST_HEAD(&fw_cache.head);
  1537. fw_cache.state = FW_LOADER_NO_CACHE;
  1538. #ifdef CONFIG_FW_CACHE
  1539. spin_lock_init(&fw_cache.name_lock);
  1540. INIT_LIST_HEAD(&fw_cache.fw_names);
  1541. INIT_DELAYED_WORK(&fw_cache.work,
  1542. device_uncache_fw_images_work);
  1543. fw_cache.pm_notify.notifier_call = fw_pm_notify;
  1544. register_pm_notifier(&fw_cache.pm_notify);
  1545. register_syscore_ops(&fw_syscore_ops);
  1546. #endif
  1547. }
  1548. static int __init firmware_class_init(void)
  1549. {
  1550. fw_cache_init();
  1551. #ifdef CONFIG_FW_LOADER_USER_HELPER
  1552. register_reboot_notifier(&fw_shutdown_nb);
  1553. return class_register(&firmware_class);
  1554. #else
  1555. return 0;
  1556. #endif
  1557. }
  1558. static void __exit firmware_class_exit(void)
  1559. {
  1560. #ifdef CONFIG_FW_CACHE
  1561. unregister_syscore_ops(&fw_syscore_ops);
  1562. unregister_pm_notifier(&fw_cache.pm_notify);
  1563. #endif
  1564. #ifdef CONFIG_FW_LOADER_USER_HELPER
  1565. unregister_reboot_notifier(&fw_shutdown_nb);
  1566. class_unregister(&firmware_class);
  1567. #endif
  1568. }
  1569. fs_initcall(firmware_class_init);
  1570. module_exit(firmware_class_exit);
  1571. EXPORT_SYMBOL(release_firmware);
  1572. EXPORT_SYMBOL(request_firmware);
  1573. EXPORT_SYMBOL(request_firmware_nowait);
  1574. EXPORT_SYMBOL_GPL(cache_firmware);
  1575. EXPORT_SYMBOL_GPL(uncache_firmware);