ec.c 53 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986
  1. /*
  2. * ec.c - ACPI Embedded Controller Driver (v3)
  3. *
  4. * Copyright (C) 2001-2015 Intel Corporation
  5. * Author: 2014, 2015 Lv Zheng <lv.zheng@intel.com>
  6. * 2006, 2007 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
  7. * 2006 Denis Sadykov <denis.m.sadykov@intel.com>
  8. * 2004 Luming Yu <luming.yu@intel.com>
  9. * 2001, 2002 Andy Grover <andrew.grover@intel.com>
  10. * 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
  11. * Copyright (C) 2008 Alexey Starikovskiy <astarikovskiy@suse.de>
  12. *
  13. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  14. *
  15. * This program is free software; you can redistribute it and/or modify
  16. * it under the terms of the GNU General Public License as published by
  17. * the Free Software Foundation; either version 2 of the License, or (at
  18. * your option) any later version.
  19. *
  20. * This program is distributed in the hope that it will be useful, but
  21. * WITHOUT ANY WARRANTY; without even the implied warranty of
  22. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  23. * General Public License for more details.
  24. *
  25. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  26. */
  27. /* Uncomment next line to get verbose printout */
  28. /* #define DEBUG */
  29. #define pr_fmt(fmt) "ACPI : EC: " fmt
  30. #include <linux/kernel.h>
  31. #include <linux/module.h>
  32. #include <linux/init.h>
  33. #include <linux/types.h>
  34. #include <linux/delay.h>
  35. #include <linux/interrupt.h>
  36. #include <linux/list.h>
  37. #include <linux/spinlock.h>
  38. #include <linux/slab.h>
  39. #include <linux/acpi.h>
  40. #include <linux/dmi.h>
  41. #include <asm/io.h>
  42. #include "internal.h"
  43. #define ACPI_EC_CLASS "embedded_controller"
  44. #define ACPI_EC_DEVICE_NAME "Embedded Controller"
  45. #define ACPI_EC_FILE_INFO "info"
  46. /* EC status register */
  47. #define ACPI_EC_FLAG_OBF 0x01 /* Output buffer full */
  48. #define ACPI_EC_FLAG_IBF 0x02 /* Input buffer full */
  49. #define ACPI_EC_FLAG_CMD 0x08 /* Input buffer contains a command */
  50. #define ACPI_EC_FLAG_BURST 0x10 /* burst mode */
  51. #define ACPI_EC_FLAG_SCI 0x20 /* EC-SCI occurred */
  52. /*
  53. * The SCI_EVT clearing timing is not defined by the ACPI specification.
  54. * This leads to lots of practical timing issues for the host EC driver.
  55. * The following variations are defined (from the target EC firmware's
  56. * perspective):
  57. * STATUS: After indicating SCI_EVT edge triggered IRQ to the host, the
  58. * target can clear SCI_EVT at any time so long as the host can see
  59. * the indication by reading the status register (EC_SC). So the
  60. * host should re-check SCI_EVT after the first time the SCI_EVT
  61. * indication is seen, which is the same time the query request
  62. * (QR_EC) is written to the command register (EC_CMD). SCI_EVT set
  63. * at any later time could indicate another event. Normally such
  64. * kind of EC firmware has implemented an event queue and will
  65. * return 0x00 to indicate "no outstanding event".
  66. * QUERY: After seeing the query request (QR_EC) written to the command
  67. * register (EC_CMD) by the host and having prepared the responding
  68. * event value in the data register (EC_DATA), the target can safely
  69. * clear SCI_EVT because the target can confirm that the current
  70. * event is being handled by the host. The host then should check
  71. * SCI_EVT right after reading the event response from the data
  72. * register (EC_DATA).
  73. * EVENT: After seeing the event response read from the data register
  74. * (EC_DATA) by the host, the target can clear SCI_EVT. As the
  75. * target requires time to notice the change in the data register
  76. * (EC_DATA), the host may be required to wait additional guarding
  77. * time before checking the SCI_EVT again. Such guarding may not be
  78. * necessary if the host is notified via another IRQ.
  79. */
  80. #define ACPI_EC_EVT_TIMING_STATUS 0x00
  81. #define ACPI_EC_EVT_TIMING_QUERY 0x01
  82. #define ACPI_EC_EVT_TIMING_EVENT 0x02
  83. /* EC commands */
  84. enum ec_command {
  85. ACPI_EC_COMMAND_READ = 0x80,
  86. ACPI_EC_COMMAND_WRITE = 0x81,
  87. ACPI_EC_BURST_ENABLE = 0x82,
  88. ACPI_EC_BURST_DISABLE = 0x83,
  89. ACPI_EC_COMMAND_QUERY = 0x84,
  90. };
  91. #define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
  92. #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
  93. #define ACPI_EC_UDELAY_POLL 550 /* Wait 1ms for EC transaction polling */
  94. #define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query
  95. * when trying to clear the EC */
  96. #define ACPI_EC_MAX_QUERIES 16 /* Maximum number of parallel queries */
  97. enum {
  98. EC_FLAGS_QUERY_ENABLED, /* Query is enabled */
  99. EC_FLAGS_QUERY_PENDING, /* Query is pending */
  100. EC_FLAGS_QUERY_GUARDING, /* Guard for SCI_EVT check */
  101. EC_FLAGS_GPE_HANDLER_INSTALLED, /* GPE handler installed */
  102. EC_FLAGS_EC_HANDLER_INSTALLED, /* OpReg handler installed */
  103. EC_FLAGS_EVT_HANDLER_INSTALLED, /* _Qxx handlers installed */
  104. EC_FLAGS_STARTED, /* Driver is started */
  105. EC_FLAGS_STOPPED, /* Driver is stopped */
  106. EC_FLAGS_COMMAND_STORM, /* GPE storms occurred to the
  107. * current command processing */
  108. };
  109. #define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */
  110. #define ACPI_EC_COMMAND_COMPLETE 0x02 /* Completed last byte */
  111. /* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */
  112. static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
  113. module_param(ec_delay, uint, 0644);
  114. MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes");
  115. static unsigned int ec_max_queries __read_mostly = ACPI_EC_MAX_QUERIES;
  116. module_param(ec_max_queries, uint, 0644);
  117. MODULE_PARM_DESC(ec_max_queries, "Maximum parallel _Qxx evaluations");
  118. static bool ec_busy_polling __read_mostly;
  119. module_param(ec_busy_polling, bool, 0644);
  120. MODULE_PARM_DESC(ec_busy_polling, "Use busy polling to advance EC transaction");
  121. static unsigned int ec_polling_guard __read_mostly = ACPI_EC_UDELAY_POLL;
  122. module_param(ec_polling_guard, uint, 0644);
  123. MODULE_PARM_DESC(ec_polling_guard, "Guard time(us) between EC accesses in polling modes");
  124. static unsigned int ec_event_clearing __read_mostly = ACPI_EC_EVT_TIMING_QUERY;
  125. /*
  126. * If the number of false interrupts per one transaction exceeds
  127. * this threshold, will think there is a GPE storm happened and
  128. * will disable the GPE for normal transaction.
  129. */
  130. static unsigned int ec_storm_threshold __read_mostly = 8;
  131. module_param(ec_storm_threshold, uint, 0644);
  132. MODULE_PARM_DESC(ec_storm_threshold, "Maxim false GPE numbers not considered as GPE storm");
  133. static bool ec_freeze_events __read_mostly = false;
  134. module_param(ec_freeze_events, bool, 0644);
  135. MODULE_PARM_DESC(ec_freeze_events, "Disabling event handling during suspend/resume");
  136. struct acpi_ec_query_handler {
  137. struct list_head node;
  138. acpi_ec_query_func func;
  139. acpi_handle handle;
  140. void *data;
  141. u8 query_bit;
  142. struct kref kref;
  143. };
  144. struct transaction {
  145. const u8 *wdata;
  146. u8 *rdata;
  147. unsigned short irq_count;
  148. u8 command;
  149. u8 wi;
  150. u8 ri;
  151. u8 wlen;
  152. u8 rlen;
  153. u8 flags;
  154. };
  155. struct acpi_ec_query {
  156. struct transaction transaction;
  157. struct work_struct work;
  158. struct acpi_ec_query_handler *handler;
  159. };
  160. static int acpi_ec_query(struct acpi_ec *ec, u8 *data);
  161. static void advance_transaction(struct acpi_ec *ec);
  162. static void acpi_ec_event_handler(struct work_struct *work);
  163. static void acpi_ec_event_processor(struct work_struct *work);
  164. struct acpi_ec *boot_ec, *first_ec;
  165. EXPORT_SYMBOL(first_ec);
  166. static bool boot_ec_is_ecdt = false;
  167. static struct workqueue_struct *ec_query_wq;
  168. static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
  169. static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */
  170. static int EC_FLAGS_CORRECT_ECDT; /* Needs ECDT port address correction */
  171. /* --------------------------------------------------------------------------
  172. * Logging/Debugging
  173. * -------------------------------------------------------------------------- */
  174. /*
  175. * Splitters used by the developers to track the boundary of the EC
  176. * handling processes.
  177. */
  178. #ifdef DEBUG
  179. #define EC_DBG_SEP " "
  180. #define EC_DBG_DRV "+++++"
  181. #define EC_DBG_STM "====="
  182. #define EC_DBG_REQ "*****"
  183. #define EC_DBG_EVT "#####"
  184. #else
  185. #define EC_DBG_SEP ""
  186. #define EC_DBG_DRV
  187. #define EC_DBG_STM
  188. #define EC_DBG_REQ
  189. #define EC_DBG_EVT
  190. #endif
  191. #define ec_log_raw(fmt, ...) \
  192. pr_info(fmt "\n", ##__VA_ARGS__)
  193. #define ec_dbg_raw(fmt, ...) \
  194. pr_debug(fmt "\n", ##__VA_ARGS__)
  195. #define ec_log(filter, fmt, ...) \
  196. ec_log_raw(filter EC_DBG_SEP fmt EC_DBG_SEP filter, ##__VA_ARGS__)
  197. #define ec_dbg(filter, fmt, ...) \
  198. ec_dbg_raw(filter EC_DBG_SEP fmt EC_DBG_SEP filter, ##__VA_ARGS__)
  199. #define ec_log_drv(fmt, ...) \
  200. ec_log(EC_DBG_DRV, fmt, ##__VA_ARGS__)
  201. #define ec_dbg_drv(fmt, ...) \
  202. ec_dbg(EC_DBG_DRV, fmt, ##__VA_ARGS__)
  203. #define ec_dbg_stm(fmt, ...) \
  204. ec_dbg(EC_DBG_STM, fmt, ##__VA_ARGS__)
  205. #define ec_dbg_req(fmt, ...) \
  206. ec_dbg(EC_DBG_REQ, fmt, ##__VA_ARGS__)
  207. #define ec_dbg_evt(fmt, ...) \
  208. ec_dbg(EC_DBG_EVT, fmt, ##__VA_ARGS__)
  209. #define ec_dbg_ref(ec, fmt, ...) \
  210. ec_dbg_raw("%lu: " fmt, ec->reference_count, ## __VA_ARGS__)
  211. /* --------------------------------------------------------------------------
  212. * Device Flags
  213. * -------------------------------------------------------------------------- */
  214. static bool acpi_ec_started(struct acpi_ec *ec)
  215. {
  216. return test_bit(EC_FLAGS_STARTED, &ec->flags) &&
  217. !test_bit(EC_FLAGS_STOPPED, &ec->flags);
  218. }
  219. static bool acpi_ec_event_enabled(struct acpi_ec *ec)
  220. {
  221. /*
  222. * There is an OSPM early stage logic. During the early stages
  223. * (boot/resume), OSPMs shouldn't enable the event handling, only
  224. * the EC transactions are allowed to be performed.
  225. */
  226. if (!test_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags))
  227. return false;
  228. /*
  229. * However, disabling the event handling is experimental for late
  230. * stage (suspend), and is controlled by the boot parameter of
  231. * "ec_freeze_events":
  232. * 1. true: The EC event handling is disabled before entering
  233. * the noirq stage.
  234. * 2. false: The EC event handling is automatically disabled as
  235. * soon as the EC driver is stopped.
  236. */
  237. if (ec_freeze_events)
  238. return acpi_ec_started(ec);
  239. else
  240. return test_bit(EC_FLAGS_STARTED, &ec->flags);
  241. }
  242. static bool acpi_ec_flushed(struct acpi_ec *ec)
  243. {
  244. return ec->reference_count == 1;
  245. }
  246. /* --------------------------------------------------------------------------
  247. * EC Registers
  248. * -------------------------------------------------------------------------- */
  249. static inline u8 acpi_ec_read_status(struct acpi_ec *ec)
  250. {
  251. u8 x = inb(ec->command_addr);
  252. ec_dbg_raw("EC_SC(R) = 0x%2.2x "
  253. "SCI_EVT=%d BURST=%d CMD=%d IBF=%d OBF=%d",
  254. x,
  255. !!(x & ACPI_EC_FLAG_SCI),
  256. !!(x & ACPI_EC_FLAG_BURST),
  257. !!(x & ACPI_EC_FLAG_CMD),
  258. !!(x & ACPI_EC_FLAG_IBF),
  259. !!(x & ACPI_EC_FLAG_OBF));
  260. return x;
  261. }
  262. static inline u8 acpi_ec_read_data(struct acpi_ec *ec)
  263. {
  264. u8 x = inb(ec->data_addr);
  265. ec->timestamp = jiffies;
  266. ec_dbg_raw("EC_DATA(R) = 0x%2.2x", x);
  267. return x;
  268. }
  269. static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command)
  270. {
  271. ec_dbg_raw("EC_SC(W) = 0x%2.2x", command);
  272. outb(command, ec->command_addr);
  273. ec->timestamp = jiffies;
  274. }
  275. static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data)
  276. {
  277. ec_dbg_raw("EC_DATA(W) = 0x%2.2x", data);
  278. outb(data, ec->data_addr);
  279. ec->timestamp = jiffies;
  280. }
  281. #ifdef DEBUG
  282. static const char *acpi_ec_cmd_string(u8 cmd)
  283. {
  284. switch (cmd) {
  285. case 0x80:
  286. return "RD_EC";
  287. case 0x81:
  288. return "WR_EC";
  289. case 0x82:
  290. return "BE_EC";
  291. case 0x83:
  292. return "BD_EC";
  293. case 0x84:
  294. return "QR_EC";
  295. }
  296. return "UNKNOWN";
  297. }
  298. #else
  299. #define acpi_ec_cmd_string(cmd) "UNDEF"
  300. #endif
  301. /* --------------------------------------------------------------------------
  302. * GPE Registers
  303. * -------------------------------------------------------------------------- */
  304. static inline bool acpi_ec_is_gpe_raised(struct acpi_ec *ec)
  305. {
  306. acpi_event_status gpe_status = 0;
  307. (void)acpi_get_gpe_status(NULL, ec->gpe, &gpe_status);
  308. return (gpe_status & ACPI_EVENT_FLAG_STATUS_SET) ? true : false;
  309. }
  310. static inline void acpi_ec_enable_gpe(struct acpi_ec *ec, bool open)
  311. {
  312. if (open)
  313. acpi_enable_gpe(NULL, ec->gpe);
  314. else {
  315. BUG_ON(ec->reference_count < 1);
  316. acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
  317. }
  318. if (acpi_ec_is_gpe_raised(ec)) {
  319. /*
  320. * On some platforms, EN=1 writes cannot trigger GPE. So
  321. * software need to manually trigger a pseudo GPE event on
  322. * EN=1 writes.
  323. */
  324. ec_dbg_raw("Polling quirk");
  325. advance_transaction(ec);
  326. }
  327. }
  328. static inline void acpi_ec_disable_gpe(struct acpi_ec *ec, bool close)
  329. {
  330. if (close)
  331. acpi_disable_gpe(NULL, ec->gpe);
  332. else {
  333. BUG_ON(ec->reference_count < 1);
  334. acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
  335. }
  336. }
  337. static inline void acpi_ec_clear_gpe(struct acpi_ec *ec)
  338. {
  339. /*
  340. * GPE STS is a W1C register, which means:
  341. * 1. Software can clear it without worrying about clearing other
  342. * GPEs' STS bits when the hardware sets them in parallel.
  343. * 2. As long as software can ensure only clearing it when it is
  344. * set, hardware won't set it in parallel.
  345. * So software can clear GPE in any contexts.
  346. * Warning: do not move the check into advance_transaction() as the
  347. * EC commands will be sent without GPE raised.
  348. */
  349. if (!acpi_ec_is_gpe_raised(ec))
  350. return;
  351. acpi_clear_gpe(NULL, ec->gpe);
  352. }
  353. /* --------------------------------------------------------------------------
  354. * Transaction Management
  355. * -------------------------------------------------------------------------- */
  356. static void acpi_ec_submit_request(struct acpi_ec *ec)
  357. {
  358. ec->reference_count++;
  359. if (test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags) &&
  360. ec->reference_count == 1)
  361. acpi_ec_enable_gpe(ec, true);
  362. }
  363. static void acpi_ec_complete_request(struct acpi_ec *ec)
  364. {
  365. bool flushed = false;
  366. ec->reference_count--;
  367. if (test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags) &&
  368. ec->reference_count == 0)
  369. acpi_ec_disable_gpe(ec, true);
  370. flushed = acpi_ec_flushed(ec);
  371. if (flushed)
  372. wake_up(&ec->wait);
  373. }
  374. static void acpi_ec_set_storm(struct acpi_ec *ec, u8 flag)
  375. {
  376. if (!test_bit(flag, &ec->flags)) {
  377. acpi_ec_disable_gpe(ec, false);
  378. ec_dbg_drv("Polling enabled");
  379. set_bit(flag, &ec->flags);
  380. }
  381. }
  382. static void acpi_ec_clear_storm(struct acpi_ec *ec, u8 flag)
  383. {
  384. if (test_bit(flag, &ec->flags)) {
  385. clear_bit(flag, &ec->flags);
  386. acpi_ec_enable_gpe(ec, false);
  387. ec_dbg_drv("Polling disabled");
  388. }
  389. }
  390. /*
  391. * acpi_ec_submit_flushable_request() - Increase the reference count unless
  392. * the flush operation is not in
  393. * progress
  394. * @ec: the EC device
  395. *
  396. * This function must be used before taking a new action that should hold
  397. * the reference count. If this function returns false, then the action
  398. * must be discarded or it will prevent the flush operation from being
  399. * completed.
  400. */
  401. static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec)
  402. {
  403. if (!acpi_ec_started(ec))
  404. return false;
  405. acpi_ec_submit_request(ec);
  406. return true;
  407. }
  408. static void acpi_ec_submit_query(struct acpi_ec *ec)
  409. {
  410. if (acpi_ec_event_enabled(ec) &&
  411. !test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
  412. ec_dbg_evt("Command(%s) submitted/blocked",
  413. acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
  414. ec->nr_pending_queries++;
  415. schedule_work(&ec->work);
  416. }
  417. }
  418. static void acpi_ec_complete_query(struct acpi_ec *ec)
  419. {
  420. if (test_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
  421. clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
  422. ec_dbg_evt("Command(%s) unblocked",
  423. acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
  424. }
  425. }
  426. static inline void __acpi_ec_enable_event(struct acpi_ec *ec)
  427. {
  428. if (!test_and_set_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags))
  429. ec_log_drv("event unblocked");
  430. /*
  431. * Unconditionally invoke this once after enabling the event
  432. * handling mechanism to detect the pending events.
  433. */
  434. advance_transaction(ec);
  435. }
  436. static inline void __acpi_ec_disable_event(struct acpi_ec *ec)
  437. {
  438. if (test_and_clear_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags))
  439. ec_log_drv("event blocked");
  440. }
  441. /*
  442. * Process _Q events that might have accumulated in the EC.
  443. * Run with locked ec mutex.
  444. */
  445. static void acpi_ec_clear(struct acpi_ec *ec)
  446. {
  447. int i, status;
  448. u8 value = 0;
  449. for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
  450. status = acpi_ec_query(ec, &value);
  451. if (status || !value)
  452. break;
  453. }
  454. if (unlikely(i == ACPI_EC_CLEAR_MAX))
  455. pr_warn("Warning: Maximum of %d stale EC events cleared\n", i);
  456. else
  457. pr_info("%d stale EC events cleared\n", i);
  458. }
  459. static void acpi_ec_enable_event(struct acpi_ec *ec)
  460. {
  461. unsigned long flags;
  462. spin_lock_irqsave(&ec->lock, flags);
  463. if (acpi_ec_started(ec))
  464. __acpi_ec_enable_event(ec);
  465. spin_unlock_irqrestore(&ec->lock, flags);
  466. /* Drain additional events if hardware requires that */
  467. if (EC_FLAGS_CLEAR_ON_RESUME)
  468. acpi_ec_clear(ec);
  469. }
  470. #ifdef CONFIG_PM_SLEEP
  471. static bool acpi_ec_query_flushed(struct acpi_ec *ec)
  472. {
  473. bool flushed;
  474. unsigned long flags;
  475. spin_lock_irqsave(&ec->lock, flags);
  476. flushed = !ec->nr_pending_queries;
  477. spin_unlock_irqrestore(&ec->lock, flags);
  478. return flushed;
  479. }
  480. static void __acpi_ec_flush_event(struct acpi_ec *ec)
  481. {
  482. /*
  483. * When ec_freeze_events is true, we need to flush events in
  484. * the proper position before entering the noirq stage.
  485. */
  486. wait_event(ec->wait, acpi_ec_query_flushed(ec));
  487. if (ec_query_wq)
  488. flush_workqueue(ec_query_wq);
  489. }
  490. static void acpi_ec_disable_event(struct acpi_ec *ec)
  491. {
  492. unsigned long flags;
  493. spin_lock_irqsave(&ec->lock, flags);
  494. __acpi_ec_disable_event(ec);
  495. spin_unlock_irqrestore(&ec->lock, flags);
  496. __acpi_ec_flush_event(ec);
  497. }
  498. #endif /* CONFIG_PM_SLEEP */
  499. static bool acpi_ec_guard_event(struct acpi_ec *ec)
  500. {
  501. bool guarded = true;
  502. unsigned long flags;
  503. spin_lock_irqsave(&ec->lock, flags);
  504. /*
  505. * If firmware SCI_EVT clearing timing is "event", we actually
  506. * don't know when the SCI_EVT will be cleared by firmware after
  507. * evaluating _Qxx, so we need to re-check SCI_EVT after waiting an
  508. * acceptable period.
  509. *
  510. * The guarding period begins when EC_FLAGS_QUERY_PENDING is
  511. * flagged, which means SCI_EVT check has just been performed.
  512. * But if the current transaction is ACPI_EC_COMMAND_QUERY, the
  513. * guarding should have already been performed (via
  514. * EC_FLAGS_QUERY_GUARDING) and should not be applied so that the
  515. * ACPI_EC_COMMAND_QUERY transaction can be transitioned into
  516. * ACPI_EC_COMMAND_POLL state immediately.
  517. */
  518. if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS ||
  519. ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY ||
  520. !test_bit(EC_FLAGS_QUERY_PENDING, &ec->flags) ||
  521. (ec->curr && ec->curr->command == ACPI_EC_COMMAND_QUERY))
  522. guarded = false;
  523. spin_unlock_irqrestore(&ec->lock, flags);
  524. return guarded;
  525. }
  526. static int ec_transaction_polled(struct acpi_ec *ec)
  527. {
  528. unsigned long flags;
  529. int ret = 0;
  530. spin_lock_irqsave(&ec->lock, flags);
  531. if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_POLL))
  532. ret = 1;
  533. spin_unlock_irqrestore(&ec->lock, flags);
  534. return ret;
  535. }
  536. static int ec_transaction_completed(struct acpi_ec *ec)
  537. {
  538. unsigned long flags;
  539. int ret = 0;
  540. spin_lock_irqsave(&ec->lock, flags);
  541. if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE))
  542. ret = 1;
  543. spin_unlock_irqrestore(&ec->lock, flags);
  544. return ret;
  545. }
  546. static inline void ec_transaction_transition(struct acpi_ec *ec, unsigned long flag)
  547. {
  548. ec->curr->flags |= flag;
  549. if (ec->curr->command == ACPI_EC_COMMAND_QUERY) {
  550. if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS &&
  551. flag == ACPI_EC_COMMAND_POLL)
  552. acpi_ec_complete_query(ec);
  553. if (ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY &&
  554. flag == ACPI_EC_COMMAND_COMPLETE)
  555. acpi_ec_complete_query(ec);
  556. if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
  557. flag == ACPI_EC_COMMAND_COMPLETE)
  558. set_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags);
  559. }
  560. }
  561. static void advance_transaction(struct acpi_ec *ec)
  562. {
  563. struct transaction *t;
  564. u8 status;
  565. bool wakeup = false;
  566. ec_dbg_stm("%s (%d)", in_interrupt() ? "IRQ" : "TASK",
  567. smp_processor_id());
  568. /*
  569. * By always clearing STS before handling all indications, we can
  570. * ensure a hardware STS 0->1 change after this clearing can always
  571. * trigger a GPE interrupt.
  572. */
  573. acpi_ec_clear_gpe(ec);
  574. status = acpi_ec_read_status(ec);
  575. t = ec->curr;
  576. /*
  577. * Another IRQ or a guarded polling mode advancement is detected,
  578. * the next QR_EC submission is then allowed.
  579. */
  580. if (!t || !(t->flags & ACPI_EC_COMMAND_POLL)) {
  581. if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
  582. (!ec->nr_pending_queries ||
  583. test_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags))) {
  584. clear_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags);
  585. acpi_ec_complete_query(ec);
  586. }
  587. }
  588. if (!t)
  589. goto err;
  590. if (t->flags & ACPI_EC_COMMAND_POLL) {
  591. if (t->wlen > t->wi) {
  592. if ((status & ACPI_EC_FLAG_IBF) == 0)
  593. acpi_ec_write_data(ec, t->wdata[t->wi++]);
  594. else
  595. goto err;
  596. } else if (t->rlen > t->ri) {
  597. if ((status & ACPI_EC_FLAG_OBF) == 1) {
  598. t->rdata[t->ri++] = acpi_ec_read_data(ec);
  599. if (t->rlen == t->ri) {
  600. ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
  601. if (t->command == ACPI_EC_COMMAND_QUERY)
  602. ec_dbg_evt("Command(%s) completed by hardware",
  603. acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
  604. wakeup = true;
  605. }
  606. } else
  607. goto err;
  608. } else if (t->wlen == t->wi &&
  609. (status & ACPI_EC_FLAG_IBF) == 0) {
  610. ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
  611. wakeup = true;
  612. }
  613. goto out;
  614. } else {
  615. if (EC_FLAGS_QUERY_HANDSHAKE &&
  616. !(status & ACPI_EC_FLAG_SCI) &&
  617. (t->command == ACPI_EC_COMMAND_QUERY)) {
  618. ec_transaction_transition(ec, ACPI_EC_COMMAND_POLL);
  619. t->rdata[t->ri++] = 0x00;
  620. ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
  621. ec_dbg_evt("Command(%s) completed by software",
  622. acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
  623. wakeup = true;
  624. } else if ((status & ACPI_EC_FLAG_IBF) == 0) {
  625. acpi_ec_write_cmd(ec, t->command);
  626. ec_transaction_transition(ec, ACPI_EC_COMMAND_POLL);
  627. } else
  628. goto err;
  629. goto out;
  630. }
  631. err:
  632. /*
  633. * If SCI bit is set, then don't think it's a false IRQ
  634. * otherwise will take a not handled IRQ as a false one.
  635. */
  636. if (!(status & ACPI_EC_FLAG_SCI)) {
  637. if (in_interrupt() && t) {
  638. if (t->irq_count < ec_storm_threshold)
  639. ++t->irq_count;
  640. /* Allow triggering on 0 threshold */
  641. if (t->irq_count == ec_storm_threshold)
  642. acpi_ec_set_storm(ec, EC_FLAGS_COMMAND_STORM);
  643. }
  644. }
  645. out:
  646. if (status & ACPI_EC_FLAG_SCI)
  647. acpi_ec_submit_query(ec);
  648. if (wakeup && in_interrupt())
  649. wake_up(&ec->wait);
  650. }
  651. static void start_transaction(struct acpi_ec *ec)
  652. {
  653. ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0;
  654. ec->curr->flags = 0;
  655. }
  656. static int ec_guard(struct acpi_ec *ec)
  657. {
  658. unsigned long guard = usecs_to_jiffies(ec->polling_guard);
  659. unsigned long timeout = ec->timestamp + guard;
  660. /* Ensure guarding period before polling EC status */
  661. do {
  662. if (ec->busy_polling) {
  663. /* Perform busy polling */
  664. if (ec_transaction_completed(ec))
  665. return 0;
  666. udelay(jiffies_to_usecs(guard));
  667. } else {
  668. /*
  669. * Perform wait polling
  670. * 1. Wait the transaction to be completed by the
  671. * GPE handler after the transaction enters
  672. * ACPI_EC_COMMAND_POLL state.
  673. * 2. A special guarding logic is also required
  674. * for event clearing mode "event" before the
  675. * transaction enters ACPI_EC_COMMAND_POLL
  676. * state.
  677. */
  678. if (!ec_transaction_polled(ec) &&
  679. !acpi_ec_guard_event(ec))
  680. break;
  681. if (wait_event_timeout(ec->wait,
  682. ec_transaction_completed(ec),
  683. guard))
  684. return 0;
  685. }
  686. } while (time_before(jiffies, timeout));
  687. return -ETIME;
  688. }
  689. static int ec_poll(struct acpi_ec *ec)
  690. {
  691. unsigned long flags;
  692. int repeat = 5; /* number of command restarts */
  693. while (repeat--) {
  694. unsigned long delay = jiffies +
  695. msecs_to_jiffies(ec_delay);
  696. do {
  697. if (!ec_guard(ec))
  698. return 0;
  699. spin_lock_irqsave(&ec->lock, flags);
  700. advance_transaction(ec);
  701. spin_unlock_irqrestore(&ec->lock, flags);
  702. } while (time_before(jiffies, delay));
  703. pr_debug("controller reset, restart transaction\n");
  704. spin_lock_irqsave(&ec->lock, flags);
  705. start_transaction(ec);
  706. spin_unlock_irqrestore(&ec->lock, flags);
  707. }
  708. return -ETIME;
  709. }
  710. static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
  711. struct transaction *t)
  712. {
  713. unsigned long tmp;
  714. int ret = 0;
  715. /* start transaction */
  716. spin_lock_irqsave(&ec->lock, tmp);
  717. /* Enable GPE for command processing (IBF=0/OBF=1) */
  718. if (!acpi_ec_submit_flushable_request(ec)) {
  719. ret = -EINVAL;
  720. goto unlock;
  721. }
  722. ec_dbg_ref(ec, "Increase command");
  723. /* following two actions should be kept atomic */
  724. ec->curr = t;
  725. ec_dbg_req("Command(%s) started", acpi_ec_cmd_string(t->command));
  726. start_transaction(ec);
  727. spin_unlock_irqrestore(&ec->lock, tmp);
  728. ret = ec_poll(ec);
  729. spin_lock_irqsave(&ec->lock, tmp);
  730. if (t->irq_count == ec_storm_threshold)
  731. acpi_ec_clear_storm(ec, EC_FLAGS_COMMAND_STORM);
  732. ec_dbg_req("Command(%s) stopped", acpi_ec_cmd_string(t->command));
  733. ec->curr = NULL;
  734. /* Disable GPE for command processing (IBF=0/OBF=1) */
  735. acpi_ec_complete_request(ec);
  736. ec_dbg_ref(ec, "Decrease command");
  737. unlock:
  738. spin_unlock_irqrestore(&ec->lock, tmp);
  739. return ret;
  740. }
  741. static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
  742. {
  743. int status;
  744. u32 glk;
  745. if (!ec || (!t) || (t->wlen && !t->wdata) || (t->rlen && !t->rdata))
  746. return -EINVAL;
  747. if (t->rdata)
  748. memset(t->rdata, 0, t->rlen);
  749. mutex_lock(&ec->mutex);
  750. if (ec->global_lock) {
  751. status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk);
  752. if (ACPI_FAILURE(status)) {
  753. status = -ENODEV;
  754. goto unlock;
  755. }
  756. }
  757. status = acpi_ec_transaction_unlocked(ec, t);
  758. if (ec->global_lock)
  759. acpi_release_global_lock(glk);
  760. unlock:
  761. mutex_unlock(&ec->mutex);
  762. return status;
  763. }
  764. static int acpi_ec_burst_enable(struct acpi_ec *ec)
  765. {
  766. u8 d;
  767. struct transaction t = {.command = ACPI_EC_BURST_ENABLE,
  768. .wdata = NULL, .rdata = &d,
  769. .wlen = 0, .rlen = 1};
  770. return acpi_ec_transaction(ec, &t);
  771. }
  772. static int acpi_ec_burst_disable(struct acpi_ec *ec)
  773. {
  774. struct transaction t = {.command = ACPI_EC_BURST_DISABLE,
  775. .wdata = NULL, .rdata = NULL,
  776. .wlen = 0, .rlen = 0};
  777. return (acpi_ec_read_status(ec) & ACPI_EC_FLAG_BURST) ?
  778. acpi_ec_transaction(ec, &t) : 0;
  779. }
  780. static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 *data)
  781. {
  782. int result;
  783. u8 d;
  784. struct transaction t = {.command = ACPI_EC_COMMAND_READ,
  785. .wdata = &address, .rdata = &d,
  786. .wlen = 1, .rlen = 1};
  787. result = acpi_ec_transaction(ec, &t);
  788. *data = d;
  789. return result;
  790. }
  791. static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data)
  792. {
  793. u8 wdata[2] = { address, data };
  794. struct transaction t = {.command = ACPI_EC_COMMAND_WRITE,
  795. .wdata = wdata, .rdata = NULL,
  796. .wlen = 2, .rlen = 0};
  797. return acpi_ec_transaction(ec, &t);
  798. }
  799. int ec_read(u8 addr, u8 *val)
  800. {
  801. int err;
  802. u8 temp_data;
  803. if (!first_ec)
  804. return -ENODEV;
  805. err = acpi_ec_read(first_ec, addr, &temp_data);
  806. if (!err) {
  807. *val = temp_data;
  808. return 0;
  809. }
  810. return err;
  811. }
  812. EXPORT_SYMBOL(ec_read);
  813. int ec_write(u8 addr, u8 val)
  814. {
  815. int err;
  816. if (!first_ec)
  817. return -ENODEV;
  818. err = acpi_ec_write(first_ec, addr, val);
  819. return err;
  820. }
  821. EXPORT_SYMBOL(ec_write);
  822. int ec_transaction(u8 command,
  823. const u8 *wdata, unsigned wdata_len,
  824. u8 *rdata, unsigned rdata_len)
  825. {
  826. struct transaction t = {.command = command,
  827. .wdata = wdata, .rdata = rdata,
  828. .wlen = wdata_len, .rlen = rdata_len};
  829. if (!first_ec)
  830. return -ENODEV;
  831. return acpi_ec_transaction(first_ec, &t);
  832. }
  833. EXPORT_SYMBOL(ec_transaction);
  834. /* Get the handle to the EC device */
  835. acpi_handle ec_get_handle(void)
  836. {
  837. if (!first_ec)
  838. return NULL;
  839. return first_ec->handle;
  840. }
  841. EXPORT_SYMBOL(ec_get_handle);
  842. static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
  843. {
  844. unsigned long flags;
  845. spin_lock_irqsave(&ec->lock, flags);
  846. if (!test_and_set_bit(EC_FLAGS_STARTED, &ec->flags)) {
  847. ec_dbg_drv("Starting EC");
  848. /* Enable GPE for event processing (SCI_EVT=1) */
  849. if (!resuming) {
  850. acpi_ec_submit_request(ec);
  851. ec_dbg_ref(ec, "Increase driver");
  852. }
  853. ec_log_drv("EC started");
  854. }
  855. spin_unlock_irqrestore(&ec->lock, flags);
  856. }
  857. static bool acpi_ec_stopped(struct acpi_ec *ec)
  858. {
  859. unsigned long flags;
  860. bool flushed;
  861. spin_lock_irqsave(&ec->lock, flags);
  862. flushed = acpi_ec_flushed(ec);
  863. spin_unlock_irqrestore(&ec->lock, flags);
  864. return flushed;
  865. }
  866. static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
  867. {
  868. unsigned long flags;
  869. spin_lock_irqsave(&ec->lock, flags);
  870. if (acpi_ec_started(ec)) {
  871. ec_dbg_drv("Stopping EC");
  872. set_bit(EC_FLAGS_STOPPED, &ec->flags);
  873. spin_unlock_irqrestore(&ec->lock, flags);
  874. wait_event(ec->wait, acpi_ec_stopped(ec));
  875. spin_lock_irqsave(&ec->lock, flags);
  876. /* Disable GPE for event processing (SCI_EVT=1) */
  877. if (!suspending) {
  878. acpi_ec_complete_request(ec);
  879. ec_dbg_ref(ec, "Decrease driver");
  880. } else if (!ec_freeze_events)
  881. __acpi_ec_disable_event(ec);
  882. clear_bit(EC_FLAGS_STARTED, &ec->flags);
  883. clear_bit(EC_FLAGS_STOPPED, &ec->flags);
  884. ec_log_drv("EC stopped");
  885. }
  886. spin_unlock_irqrestore(&ec->lock, flags);
  887. }
  888. static void acpi_ec_enter_noirq(struct acpi_ec *ec)
  889. {
  890. unsigned long flags;
  891. spin_lock_irqsave(&ec->lock, flags);
  892. ec->busy_polling = true;
  893. ec->polling_guard = 0;
  894. ec_log_drv("interrupt blocked");
  895. spin_unlock_irqrestore(&ec->lock, flags);
  896. }
  897. static void acpi_ec_leave_noirq(struct acpi_ec *ec)
  898. {
  899. unsigned long flags;
  900. spin_lock_irqsave(&ec->lock, flags);
  901. ec->busy_polling = ec_busy_polling;
  902. ec->polling_guard = ec_polling_guard;
  903. ec_log_drv("interrupt unblocked");
  904. spin_unlock_irqrestore(&ec->lock, flags);
  905. }
  906. void acpi_ec_block_transactions(void)
  907. {
  908. struct acpi_ec *ec = first_ec;
  909. if (!ec)
  910. return;
  911. mutex_lock(&ec->mutex);
  912. /* Prevent transactions from being carried out */
  913. acpi_ec_stop(ec, true);
  914. mutex_unlock(&ec->mutex);
  915. }
  916. void acpi_ec_unblock_transactions(void)
  917. {
  918. /*
  919. * Allow transactions to happen again (this function is called from
  920. * atomic context during wakeup, so we don't need to acquire the mutex).
  921. */
  922. if (first_ec)
  923. acpi_ec_start(first_ec, true);
  924. }
  925. /* --------------------------------------------------------------------------
  926. Event Management
  927. -------------------------------------------------------------------------- */
  928. static struct acpi_ec_query_handler *
  929. acpi_ec_get_query_handler(struct acpi_ec_query_handler *handler)
  930. {
  931. if (handler)
  932. kref_get(&handler->kref);
  933. return handler;
  934. }
  935. static struct acpi_ec_query_handler *
  936. acpi_ec_get_query_handler_by_value(struct acpi_ec *ec, u8 value)
  937. {
  938. struct acpi_ec_query_handler *handler;
  939. bool found = false;
  940. mutex_lock(&ec->mutex);
  941. list_for_each_entry(handler, &ec->list, node) {
  942. if (value == handler->query_bit) {
  943. found = true;
  944. break;
  945. }
  946. }
  947. mutex_unlock(&ec->mutex);
  948. return found ? acpi_ec_get_query_handler(handler) : NULL;
  949. }
  950. static void acpi_ec_query_handler_release(struct kref *kref)
  951. {
  952. struct acpi_ec_query_handler *handler =
  953. container_of(kref, struct acpi_ec_query_handler, kref);
  954. kfree(handler);
  955. }
  956. static void acpi_ec_put_query_handler(struct acpi_ec_query_handler *handler)
  957. {
  958. kref_put(&handler->kref, acpi_ec_query_handler_release);
  959. }
  960. int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
  961. acpi_handle handle, acpi_ec_query_func func,
  962. void *data)
  963. {
  964. struct acpi_ec_query_handler *handler =
  965. kzalloc(sizeof(struct acpi_ec_query_handler), GFP_KERNEL);
  966. if (!handler)
  967. return -ENOMEM;
  968. handler->query_bit = query_bit;
  969. handler->handle = handle;
  970. handler->func = func;
  971. handler->data = data;
  972. mutex_lock(&ec->mutex);
  973. kref_init(&handler->kref);
  974. list_add(&handler->node, &ec->list);
  975. mutex_unlock(&ec->mutex);
  976. return 0;
  977. }
  978. EXPORT_SYMBOL_GPL(acpi_ec_add_query_handler);
  979. static void acpi_ec_remove_query_handlers(struct acpi_ec *ec,
  980. bool remove_all, u8 query_bit)
  981. {
  982. struct acpi_ec_query_handler *handler, *tmp;
  983. LIST_HEAD(free_list);
  984. mutex_lock(&ec->mutex);
  985. list_for_each_entry_safe(handler, tmp, &ec->list, node) {
  986. if (remove_all || query_bit == handler->query_bit) {
  987. list_del_init(&handler->node);
  988. list_add(&handler->node, &free_list);
  989. }
  990. }
  991. mutex_unlock(&ec->mutex);
  992. list_for_each_entry_safe(handler, tmp, &free_list, node)
  993. acpi_ec_put_query_handler(handler);
  994. }
  995. void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
  996. {
  997. acpi_ec_remove_query_handlers(ec, false, query_bit);
  998. }
  999. EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
  1000. static struct acpi_ec_query *acpi_ec_create_query(u8 *pval)
  1001. {
  1002. struct acpi_ec_query *q;
  1003. struct transaction *t;
  1004. q = kzalloc(sizeof (struct acpi_ec_query), GFP_KERNEL);
  1005. if (!q)
  1006. return NULL;
  1007. INIT_WORK(&q->work, acpi_ec_event_processor);
  1008. t = &q->transaction;
  1009. t->command = ACPI_EC_COMMAND_QUERY;
  1010. t->rdata = pval;
  1011. t->rlen = 1;
  1012. return q;
  1013. }
  1014. static void acpi_ec_delete_query(struct acpi_ec_query *q)
  1015. {
  1016. if (q) {
  1017. if (q->handler)
  1018. acpi_ec_put_query_handler(q->handler);
  1019. kfree(q);
  1020. }
  1021. }
  1022. static void acpi_ec_event_processor(struct work_struct *work)
  1023. {
  1024. struct acpi_ec_query *q = container_of(work, struct acpi_ec_query, work);
  1025. struct acpi_ec_query_handler *handler = q->handler;
  1026. ec_dbg_evt("Query(0x%02x) started", handler->query_bit);
  1027. if (handler->func)
  1028. handler->func(handler->data);
  1029. else if (handler->handle)
  1030. acpi_evaluate_object(handler->handle, NULL, NULL, NULL);
  1031. ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit);
  1032. acpi_ec_delete_query(q);
  1033. }
  1034. static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
  1035. {
  1036. u8 value = 0;
  1037. int result;
  1038. struct acpi_ec_query *q;
  1039. q = acpi_ec_create_query(&value);
  1040. if (!q)
  1041. return -ENOMEM;
  1042. /*
  1043. * Query the EC to find out which _Qxx method we need to evaluate.
  1044. * Note that successful completion of the query causes the ACPI_EC_SCI
  1045. * bit to be cleared (and thus clearing the interrupt source).
  1046. */
  1047. result = acpi_ec_transaction(ec, &q->transaction);
  1048. if (!value)
  1049. result = -ENODATA;
  1050. if (result)
  1051. goto err_exit;
  1052. q->handler = acpi_ec_get_query_handler_by_value(ec, value);
  1053. if (!q->handler) {
  1054. result = -ENODATA;
  1055. goto err_exit;
  1056. }
  1057. /*
  1058. * It is reported that _Qxx are evaluated in a parallel way on
  1059. * Windows:
  1060. * https://bugzilla.kernel.org/show_bug.cgi?id=94411
  1061. *
  1062. * Put this log entry before schedule_work() in order to make
  1063. * it appearing before any other log entries occurred during the
  1064. * work queue execution.
  1065. */
  1066. ec_dbg_evt("Query(0x%02x) scheduled", value);
  1067. if (!queue_work(ec_query_wq, &q->work)) {
  1068. ec_dbg_evt("Query(0x%02x) overlapped", value);
  1069. result = -EBUSY;
  1070. }
  1071. err_exit:
  1072. if (result)
  1073. acpi_ec_delete_query(q);
  1074. if (data)
  1075. *data = value;
  1076. return result;
  1077. }
  1078. static void acpi_ec_check_event(struct acpi_ec *ec)
  1079. {
  1080. unsigned long flags;
  1081. if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT) {
  1082. if (ec_guard(ec)) {
  1083. spin_lock_irqsave(&ec->lock, flags);
  1084. /*
  1085. * Take care of the SCI_EVT unless no one else is
  1086. * taking care of it.
  1087. */
  1088. if (!ec->curr)
  1089. advance_transaction(ec);
  1090. spin_unlock_irqrestore(&ec->lock, flags);
  1091. }
  1092. }
  1093. }
  1094. static void acpi_ec_event_handler(struct work_struct *work)
  1095. {
  1096. unsigned long flags;
  1097. struct acpi_ec *ec = container_of(work, struct acpi_ec, work);
  1098. ec_dbg_evt("Event started");
  1099. spin_lock_irqsave(&ec->lock, flags);
  1100. while (ec->nr_pending_queries) {
  1101. spin_unlock_irqrestore(&ec->lock, flags);
  1102. (void)acpi_ec_query(ec, NULL);
  1103. spin_lock_irqsave(&ec->lock, flags);
  1104. ec->nr_pending_queries--;
  1105. /*
  1106. * Before exit, make sure that this work item can be
  1107. * scheduled again. There might be QR_EC failures, leaving
  1108. * EC_FLAGS_QUERY_PENDING uncleared and preventing this work
  1109. * item from being scheduled again.
  1110. */
  1111. if (!ec->nr_pending_queries) {
  1112. if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS ||
  1113. ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY)
  1114. acpi_ec_complete_query(ec);
  1115. }
  1116. }
  1117. spin_unlock_irqrestore(&ec->lock, flags);
  1118. ec_dbg_evt("Event stopped");
  1119. acpi_ec_check_event(ec);
  1120. }
  1121. static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
  1122. u32 gpe_number, void *data)
  1123. {
  1124. unsigned long flags;
  1125. struct acpi_ec *ec = data;
  1126. spin_lock_irqsave(&ec->lock, flags);
  1127. advance_transaction(ec);
  1128. spin_unlock_irqrestore(&ec->lock, flags);
  1129. return ACPI_INTERRUPT_HANDLED;
  1130. }
  1131. /* --------------------------------------------------------------------------
  1132. * Address Space Management
  1133. * -------------------------------------------------------------------------- */
  1134. static acpi_status
  1135. acpi_ec_space_handler(u32 function, acpi_physical_address address,
  1136. u32 bits, u64 *value64,
  1137. void *handler_context, void *region_context)
  1138. {
  1139. struct acpi_ec *ec = handler_context;
  1140. int result = 0, i, bytes = bits / 8;
  1141. u8 *value = (u8 *)value64;
  1142. if ((address > 0xFF) || !value || !handler_context)
  1143. return AE_BAD_PARAMETER;
  1144. if (function != ACPI_READ && function != ACPI_WRITE)
  1145. return AE_BAD_PARAMETER;
  1146. if (ec->busy_polling || bits > 8)
  1147. acpi_ec_burst_enable(ec);
  1148. for (i = 0; i < bytes; ++i, ++address, ++value)
  1149. result = (function == ACPI_READ) ?
  1150. acpi_ec_read(ec, address, value) :
  1151. acpi_ec_write(ec, address, *value);
  1152. if (ec->busy_polling || bits > 8)
  1153. acpi_ec_burst_disable(ec);
  1154. switch (result) {
  1155. case -EINVAL:
  1156. return AE_BAD_PARAMETER;
  1157. case -ENODEV:
  1158. return AE_NOT_FOUND;
  1159. case -ETIME:
  1160. return AE_TIME;
  1161. default:
  1162. return AE_OK;
  1163. }
  1164. }
  1165. /* --------------------------------------------------------------------------
  1166. * Driver Interface
  1167. * -------------------------------------------------------------------------- */
  1168. static acpi_status
  1169. ec_parse_io_ports(struct acpi_resource *resource, void *context);
  1170. static void acpi_ec_free(struct acpi_ec *ec)
  1171. {
  1172. if (first_ec == ec)
  1173. first_ec = NULL;
  1174. if (boot_ec == ec)
  1175. boot_ec = NULL;
  1176. kfree(ec);
  1177. }
  1178. static struct acpi_ec *acpi_ec_alloc(void)
  1179. {
  1180. struct acpi_ec *ec = kzalloc(sizeof(struct acpi_ec), GFP_KERNEL);
  1181. if (!ec)
  1182. return NULL;
  1183. mutex_init(&ec->mutex);
  1184. init_waitqueue_head(&ec->wait);
  1185. INIT_LIST_HEAD(&ec->list);
  1186. spin_lock_init(&ec->lock);
  1187. INIT_WORK(&ec->work, acpi_ec_event_handler);
  1188. ec->timestamp = jiffies;
  1189. ec->busy_polling = true;
  1190. ec->polling_guard = 0;
  1191. return ec;
  1192. }
  1193. static acpi_status
  1194. acpi_ec_register_query_methods(acpi_handle handle, u32 level,
  1195. void *context, void **return_value)
  1196. {
  1197. char node_name[5];
  1198. struct acpi_buffer buffer = { sizeof(node_name), node_name };
  1199. struct acpi_ec *ec = context;
  1200. int value = 0;
  1201. acpi_status status;
  1202. status = acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer);
  1203. if (ACPI_SUCCESS(status) && sscanf(node_name, "_Q%x", &value) == 1)
  1204. acpi_ec_add_query_handler(ec, value, handle, NULL, NULL);
  1205. return AE_OK;
  1206. }
  1207. static acpi_status
  1208. ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval)
  1209. {
  1210. acpi_status status;
  1211. unsigned long long tmp = 0;
  1212. struct acpi_ec *ec = context;
  1213. /* clear addr values, ec_parse_io_ports depend on it */
  1214. ec->command_addr = ec->data_addr = 0;
  1215. status = acpi_walk_resources(handle, METHOD_NAME__CRS,
  1216. ec_parse_io_ports, ec);
  1217. if (ACPI_FAILURE(status))
  1218. return status;
  1219. /* Get GPE bit assignment (EC events). */
  1220. /* TODO: Add support for _GPE returning a package */
  1221. status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp);
  1222. if (ACPI_FAILURE(status))
  1223. return status;
  1224. ec->gpe = tmp;
  1225. /* Use the global lock for all EC transactions? */
  1226. tmp = 0;
  1227. acpi_evaluate_integer(handle, "_GLK", NULL, &tmp);
  1228. ec->global_lock = tmp;
  1229. ec->handle = handle;
  1230. return AE_CTRL_TERMINATE;
  1231. }
  1232. /*
  1233. * Note: This function returns an error code only when the address space
  1234. * handler is not installed, which means "not able to handle
  1235. * transactions".
  1236. */
  1237. static int ec_install_handlers(struct acpi_ec *ec, bool handle_events)
  1238. {
  1239. acpi_status status;
  1240. acpi_ec_start(ec, false);
  1241. if (!test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
  1242. acpi_ec_enter_noirq(ec);
  1243. status = acpi_install_address_space_handler(ec->handle,
  1244. ACPI_ADR_SPACE_EC,
  1245. &acpi_ec_space_handler,
  1246. NULL, ec);
  1247. if (ACPI_FAILURE(status)) {
  1248. if (status == AE_NOT_FOUND) {
  1249. /*
  1250. * Maybe OS fails in evaluating the _REG
  1251. * object. The AE_NOT_FOUND error will be
  1252. * ignored and OS * continue to initialize
  1253. * EC.
  1254. */
  1255. pr_err("Fail in evaluating the _REG object"
  1256. " of EC device. Broken bios is suspected.\n");
  1257. } else {
  1258. acpi_ec_stop(ec, false);
  1259. return -ENODEV;
  1260. }
  1261. }
  1262. set_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
  1263. }
  1264. if (!handle_events)
  1265. return 0;
  1266. if (!test_bit(EC_FLAGS_EVT_HANDLER_INSTALLED, &ec->flags)) {
  1267. /* Find and register all query methods */
  1268. acpi_walk_namespace(ACPI_TYPE_METHOD, ec->handle, 1,
  1269. acpi_ec_register_query_methods,
  1270. NULL, ec, NULL);
  1271. set_bit(EC_FLAGS_EVT_HANDLER_INSTALLED, &ec->flags);
  1272. }
  1273. if (!test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags)) {
  1274. status = acpi_install_gpe_raw_handler(NULL, ec->gpe,
  1275. ACPI_GPE_EDGE_TRIGGERED,
  1276. &acpi_ec_gpe_handler, ec);
  1277. /* This is not fatal as we can poll EC events */
  1278. if (ACPI_SUCCESS(status)) {
  1279. set_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags);
  1280. acpi_ec_leave_noirq(ec);
  1281. if (test_bit(EC_FLAGS_STARTED, &ec->flags) &&
  1282. ec->reference_count >= 1)
  1283. acpi_ec_enable_gpe(ec, true);
  1284. }
  1285. }
  1286. /* EC is fully operational, allow queries */
  1287. acpi_ec_enable_event(ec);
  1288. return 0;
  1289. }
  1290. static void ec_remove_handlers(struct acpi_ec *ec)
  1291. {
  1292. if (test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
  1293. if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
  1294. ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
  1295. pr_err("failed to remove space handler\n");
  1296. clear_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
  1297. }
  1298. /*
  1299. * Stops handling the EC transactions after removing the operation
  1300. * region handler. This is required because _REG(DISCONNECT)
  1301. * invoked during the removal can result in new EC transactions.
  1302. *
  1303. * Flushes the EC requests and thus disables the GPE before
  1304. * removing the GPE handler. This is required by the current ACPICA
  1305. * GPE core. ACPICA GPE core will automatically disable a GPE when
  1306. * it is indicated but there is no way to handle it. So the drivers
  1307. * must disable the GPEs prior to removing the GPE handlers.
  1308. */
  1309. acpi_ec_stop(ec, false);
  1310. if (test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags)) {
  1311. if (ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
  1312. &acpi_ec_gpe_handler)))
  1313. pr_err("failed to remove gpe handler\n");
  1314. clear_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags);
  1315. }
  1316. if (test_bit(EC_FLAGS_EVT_HANDLER_INSTALLED, &ec->flags)) {
  1317. acpi_ec_remove_query_handlers(ec, true, 0);
  1318. clear_bit(EC_FLAGS_EVT_HANDLER_INSTALLED, &ec->flags);
  1319. }
  1320. }
  1321. static int acpi_ec_setup(struct acpi_ec *ec, bool handle_events)
  1322. {
  1323. int ret;
  1324. ret = ec_install_handlers(ec, handle_events);
  1325. if (ret)
  1326. return ret;
  1327. /* First EC capable of handling transactions */
  1328. if (!first_ec) {
  1329. first_ec = ec;
  1330. acpi_handle_info(first_ec->handle, "Used as first EC\n");
  1331. }
  1332. acpi_handle_info(ec->handle,
  1333. "GPE=0x%x, EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n",
  1334. ec->gpe, ec->command_addr, ec->data_addr);
  1335. return ret;
  1336. }
  1337. static int acpi_config_boot_ec(struct acpi_ec *ec, acpi_handle handle,
  1338. bool handle_events, bool is_ecdt)
  1339. {
  1340. int ret;
  1341. /*
  1342. * Changing the ACPI handle results in a re-configuration of the
  1343. * boot EC. And if it happens after the namespace initialization,
  1344. * it causes _REG evaluations.
  1345. */
  1346. if (boot_ec && boot_ec->handle != handle)
  1347. ec_remove_handlers(boot_ec);
  1348. /* Unset old boot EC */
  1349. if (boot_ec != ec)
  1350. acpi_ec_free(boot_ec);
  1351. /*
  1352. * ECDT device creation is split into acpi_ec_ecdt_probe() and
  1353. * acpi_ec_ecdt_start(). This function takes care of completing the
  1354. * ECDT parsing logic as the handle update should be performed
  1355. * between the installation/uninstallation of the handlers.
  1356. */
  1357. if (ec->handle != handle)
  1358. ec->handle = handle;
  1359. ret = acpi_ec_setup(ec, handle_events);
  1360. if (ret)
  1361. return ret;
  1362. /* Set new boot EC */
  1363. if (!boot_ec) {
  1364. boot_ec = ec;
  1365. boot_ec_is_ecdt = is_ecdt;
  1366. }
  1367. acpi_handle_info(boot_ec->handle,
  1368. "Used as boot %s EC to handle transactions%s\n",
  1369. is_ecdt ? "ECDT" : "DSDT",
  1370. handle_events ? " and events" : "");
  1371. return ret;
  1372. }
  1373. static bool acpi_ec_ecdt_get_handle(acpi_handle *phandle)
  1374. {
  1375. struct acpi_table_ecdt *ecdt_ptr;
  1376. acpi_status status;
  1377. acpi_handle handle;
  1378. status = acpi_get_table(ACPI_SIG_ECDT, 1,
  1379. (struct acpi_table_header **)&ecdt_ptr);
  1380. if (ACPI_FAILURE(status))
  1381. return false;
  1382. status = acpi_get_handle(NULL, ecdt_ptr->id, &handle);
  1383. if (ACPI_FAILURE(status))
  1384. return false;
  1385. *phandle = handle;
  1386. return true;
  1387. }
  1388. static bool acpi_is_boot_ec(struct acpi_ec *ec)
  1389. {
  1390. if (!boot_ec)
  1391. return false;
  1392. if (ec->handle == boot_ec->handle &&
  1393. ec->gpe == boot_ec->gpe &&
  1394. ec->command_addr == boot_ec->command_addr &&
  1395. ec->data_addr == boot_ec->data_addr)
  1396. return true;
  1397. return false;
  1398. }
  1399. static int acpi_ec_add(struct acpi_device *device)
  1400. {
  1401. struct acpi_ec *ec = NULL;
  1402. int ret;
  1403. strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME);
  1404. strcpy(acpi_device_class(device), ACPI_EC_CLASS);
  1405. ec = acpi_ec_alloc();
  1406. if (!ec)
  1407. return -ENOMEM;
  1408. if (ec_parse_device(device->handle, 0, ec, NULL) !=
  1409. AE_CTRL_TERMINATE) {
  1410. ret = -EINVAL;
  1411. goto err_alloc;
  1412. }
  1413. if (acpi_is_boot_ec(ec)) {
  1414. boot_ec_is_ecdt = false;
  1415. acpi_handle_debug(ec->handle, "duplicated.\n");
  1416. acpi_ec_free(ec);
  1417. ec = boot_ec;
  1418. ret = acpi_config_boot_ec(ec, ec->handle, true, false);
  1419. } else
  1420. ret = acpi_ec_setup(ec, true);
  1421. if (ret)
  1422. goto err_query;
  1423. device->driver_data = ec;
  1424. ret = !!request_region(ec->data_addr, 1, "EC data");
  1425. WARN(!ret, "Could not request EC data io port 0x%lx", ec->data_addr);
  1426. ret = !!request_region(ec->command_addr, 1, "EC cmd");
  1427. WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr);
  1428. /* Reprobe devices depending on the EC */
  1429. acpi_walk_dep_device_list(ec->handle);
  1430. acpi_handle_debug(ec->handle, "enumerated.\n");
  1431. return 0;
  1432. err_query:
  1433. if (ec != boot_ec)
  1434. acpi_ec_remove_query_handlers(ec, true, 0);
  1435. err_alloc:
  1436. if (ec != boot_ec)
  1437. acpi_ec_free(ec);
  1438. return ret;
  1439. }
  1440. static int acpi_ec_remove(struct acpi_device *device)
  1441. {
  1442. struct acpi_ec *ec;
  1443. if (!device)
  1444. return -EINVAL;
  1445. ec = acpi_driver_data(device);
  1446. release_region(ec->data_addr, 1);
  1447. release_region(ec->command_addr, 1);
  1448. device->driver_data = NULL;
  1449. if (ec != boot_ec) {
  1450. ec_remove_handlers(ec);
  1451. acpi_ec_free(ec);
  1452. }
  1453. return 0;
  1454. }
  1455. static acpi_status
  1456. ec_parse_io_ports(struct acpi_resource *resource, void *context)
  1457. {
  1458. struct acpi_ec *ec = context;
  1459. if (resource->type != ACPI_RESOURCE_TYPE_IO)
  1460. return AE_OK;
  1461. /*
  1462. * The first address region returned is the data port, and
  1463. * the second address region returned is the status/command
  1464. * port.
  1465. */
  1466. if (ec->data_addr == 0)
  1467. ec->data_addr = resource->data.io.minimum;
  1468. else if (ec->command_addr == 0)
  1469. ec->command_addr = resource->data.io.minimum;
  1470. else
  1471. return AE_CTRL_TERMINATE;
  1472. return AE_OK;
  1473. }
  1474. static const struct acpi_device_id ec_device_ids[] = {
  1475. {"PNP0C09", 0},
  1476. {"", 0},
  1477. };
  1478. int __init acpi_ec_dsdt_probe(void)
  1479. {
  1480. acpi_status status;
  1481. struct acpi_ec *ec;
  1482. int ret;
  1483. ec = acpi_ec_alloc();
  1484. if (!ec)
  1485. return -ENOMEM;
  1486. /*
  1487. * At this point, the namespace is initialized, so start to find
  1488. * the namespace objects.
  1489. */
  1490. status = acpi_get_devices(ec_device_ids[0].id,
  1491. ec_parse_device, ec, NULL);
  1492. if (ACPI_FAILURE(status) || !ec->handle) {
  1493. ret = -ENODEV;
  1494. goto error;
  1495. }
  1496. /*
  1497. * When the DSDT EC is available, always re-configure boot EC to
  1498. * have _REG evaluated. _REG can only be evaluated after the
  1499. * namespace initialization.
  1500. * At this point, the GPE is not fully initialized, so do not to
  1501. * handle the events.
  1502. */
  1503. ret = acpi_config_boot_ec(ec, ec->handle, false, false);
  1504. error:
  1505. if (ret)
  1506. acpi_ec_free(ec);
  1507. return ret;
  1508. }
  1509. /*
  1510. * If the DSDT EC is not functioning, we still need to prepare a fully
  1511. * functioning ECDT EC first in order to handle the events.
  1512. * https://bugzilla.kernel.org/show_bug.cgi?id=115021
  1513. */
  1514. static int __init acpi_ec_ecdt_start(void)
  1515. {
  1516. acpi_handle handle;
  1517. if (!boot_ec)
  1518. return -ENODEV;
  1519. /*
  1520. * The DSDT EC should have already been started in
  1521. * acpi_ec_add().
  1522. */
  1523. if (!boot_ec_is_ecdt)
  1524. return -ENODEV;
  1525. /*
  1526. * At this point, the namespace and the GPE is initialized, so
  1527. * start to find the namespace objects and handle the events.
  1528. */
  1529. if (!acpi_ec_ecdt_get_handle(&handle))
  1530. return -ENODEV;
  1531. return acpi_config_boot_ec(boot_ec, handle, true, true);
  1532. }
  1533. #if 0
  1534. /*
  1535. * Some EC firmware variations refuses to respond QR_EC when SCI_EVT is not
  1536. * set, for which case, we complete the QR_EC without issuing it to the
  1537. * firmware.
  1538. * https://bugzilla.kernel.org/show_bug.cgi?id=82611
  1539. * https://bugzilla.kernel.org/show_bug.cgi?id=97381
  1540. */
  1541. static int ec_flag_query_handshake(const struct dmi_system_id *id)
  1542. {
  1543. pr_debug("Detected the EC firmware requiring QR_EC issued when SCI_EVT set\n");
  1544. EC_FLAGS_QUERY_HANDSHAKE = 1;
  1545. return 0;
  1546. }
  1547. #endif
  1548. /*
  1549. * On some hardware it is necessary to clear events accumulated by the EC during
  1550. * sleep. These ECs stop reporting GPEs until they are manually polled, if too
  1551. * many events are accumulated. (e.g. Samsung Series 5/9 notebooks)
  1552. *
  1553. * https://bugzilla.kernel.org/show_bug.cgi?id=44161
  1554. *
  1555. * Ideally, the EC should also be instructed NOT to accumulate events during
  1556. * sleep (which Windows seems to do somehow), but the interface to control this
  1557. * behaviour is not known at this time.
  1558. *
  1559. * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx,
  1560. * however it is very likely that other Samsung models are affected.
  1561. *
  1562. * On systems which don't accumulate _Q events during sleep, this extra check
  1563. * should be harmless.
  1564. */
  1565. static int ec_clear_on_resume(const struct dmi_system_id *id)
  1566. {
  1567. pr_debug("Detected system needing EC poll on resume.\n");
  1568. EC_FLAGS_CLEAR_ON_RESUME = 1;
  1569. ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS;
  1570. return 0;
  1571. }
  1572. /*
  1573. * Some ECDTs contain wrong register addresses.
  1574. * MSI MS-171F
  1575. * https://bugzilla.kernel.org/show_bug.cgi?id=12461
  1576. */
  1577. static int ec_correct_ecdt(const struct dmi_system_id *id)
  1578. {
  1579. pr_debug("Detected system needing ECDT address correction.\n");
  1580. EC_FLAGS_CORRECT_ECDT = 1;
  1581. return 0;
  1582. }
  1583. static struct dmi_system_id ec_dmi_table[] __initdata = {
  1584. {
  1585. ec_correct_ecdt, "MSI MS-171F", {
  1586. DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star"),
  1587. DMI_MATCH(DMI_PRODUCT_NAME, "MS-171F"),}, NULL},
  1588. {
  1589. ec_clear_on_resume, "Samsung hardware", {
  1590. DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
  1591. {},
  1592. };
  1593. int __init acpi_ec_ecdt_probe(void)
  1594. {
  1595. int ret;
  1596. acpi_status status;
  1597. struct acpi_table_ecdt *ecdt_ptr;
  1598. struct acpi_ec *ec;
  1599. ec = acpi_ec_alloc();
  1600. if (!ec)
  1601. return -ENOMEM;
  1602. /*
  1603. * Generate a boot ec context
  1604. */
  1605. dmi_check_system(ec_dmi_table);
  1606. status = acpi_get_table(ACPI_SIG_ECDT, 1,
  1607. (struct acpi_table_header **)&ecdt_ptr);
  1608. if (ACPI_FAILURE(status)) {
  1609. ret = -ENODEV;
  1610. goto error;
  1611. }
  1612. if (!ecdt_ptr->control.address || !ecdt_ptr->data.address) {
  1613. /*
  1614. * Asus X50GL:
  1615. * https://bugzilla.kernel.org/show_bug.cgi?id=11880
  1616. */
  1617. ret = -ENODEV;
  1618. goto error;
  1619. }
  1620. if (EC_FLAGS_CORRECT_ECDT) {
  1621. ec->command_addr = ecdt_ptr->data.address;
  1622. ec->data_addr = ecdt_ptr->control.address;
  1623. } else {
  1624. ec->command_addr = ecdt_ptr->control.address;
  1625. ec->data_addr = ecdt_ptr->data.address;
  1626. }
  1627. ec->gpe = ecdt_ptr->gpe;
  1628. /*
  1629. * At this point, the namespace is not initialized, so do not find
  1630. * the namespace objects, or handle the events.
  1631. */
  1632. ret = acpi_config_boot_ec(ec, ACPI_ROOT_OBJECT, false, true);
  1633. error:
  1634. if (ret)
  1635. acpi_ec_free(ec);
  1636. return ret;
  1637. }
  1638. #ifdef CONFIG_PM_SLEEP
  1639. static int acpi_ec_suspend(struct device *dev)
  1640. {
  1641. struct acpi_ec *ec =
  1642. acpi_driver_data(to_acpi_device(dev));
  1643. if (ec_freeze_events)
  1644. acpi_ec_disable_event(ec);
  1645. return 0;
  1646. }
  1647. static int acpi_ec_resume(struct device *dev)
  1648. {
  1649. struct acpi_ec *ec =
  1650. acpi_driver_data(to_acpi_device(dev));
  1651. acpi_ec_enable_event(ec);
  1652. return 0;
  1653. }
  1654. #endif
  1655. static const struct dev_pm_ops acpi_ec_pm = {
  1656. SET_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend, acpi_ec_resume)
  1657. };
  1658. static int param_set_event_clearing(const char *val, struct kernel_param *kp)
  1659. {
  1660. int result = 0;
  1661. if (!strncmp(val, "status", sizeof("status") - 1)) {
  1662. ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS;
  1663. pr_info("Assuming SCI_EVT clearing on EC_SC accesses\n");
  1664. } else if (!strncmp(val, "query", sizeof("query") - 1)) {
  1665. ec_event_clearing = ACPI_EC_EVT_TIMING_QUERY;
  1666. pr_info("Assuming SCI_EVT clearing on QR_EC writes\n");
  1667. } else if (!strncmp(val, "event", sizeof("event") - 1)) {
  1668. ec_event_clearing = ACPI_EC_EVT_TIMING_EVENT;
  1669. pr_info("Assuming SCI_EVT clearing on event reads\n");
  1670. } else
  1671. result = -EINVAL;
  1672. return result;
  1673. }
  1674. static int param_get_event_clearing(char *buffer, struct kernel_param *kp)
  1675. {
  1676. switch (ec_event_clearing) {
  1677. case ACPI_EC_EVT_TIMING_STATUS:
  1678. return sprintf(buffer, "status");
  1679. case ACPI_EC_EVT_TIMING_QUERY:
  1680. return sprintf(buffer, "query");
  1681. case ACPI_EC_EVT_TIMING_EVENT:
  1682. return sprintf(buffer, "event");
  1683. default:
  1684. return sprintf(buffer, "invalid");
  1685. }
  1686. return 0;
  1687. }
  1688. module_param_call(ec_event_clearing, param_set_event_clearing, param_get_event_clearing,
  1689. NULL, 0644);
  1690. MODULE_PARM_DESC(ec_event_clearing, "Assumed SCI_EVT clearing timing");
  1691. static struct acpi_driver acpi_ec_driver = {
  1692. .name = "ec",
  1693. .class = ACPI_EC_CLASS,
  1694. .ids = ec_device_ids,
  1695. .ops = {
  1696. .add = acpi_ec_add,
  1697. .remove = acpi_ec_remove,
  1698. },
  1699. .drv.pm = &acpi_ec_pm,
  1700. };
  1701. static inline int acpi_ec_query_init(void)
  1702. {
  1703. if (!ec_query_wq) {
  1704. ec_query_wq = alloc_workqueue("kec_query", 0,
  1705. ec_max_queries);
  1706. if (!ec_query_wq)
  1707. return -ENODEV;
  1708. }
  1709. return 0;
  1710. }
  1711. static inline void acpi_ec_query_exit(void)
  1712. {
  1713. if (ec_query_wq) {
  1714. destroy_workqueue(ec_query_wq);
  1715. ec_query_wq = NULL;
  1716. }
  1717. }
  1718. int __init acpi_ec_init(void)
  1719. {
  1720. int result;
  1721. int ecdt_fail, dsdt_fail;
  1722. /* register workqueue for _Qxx evaluations */
  1723. result = acpi_ec_query_init();
  1724. if (result)
  1725. return result;
  1726. /* Drivers must be started after acpi_ec_query_init() */
  1727. ecdt_fail = acpi_ec_ecdt_start();
  1728. dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver);
  1729. return ecdt_fail && dsdt_fail ? -ENODEV : 0;
  1730. }
  1731. /* EC driver currently not unloadable */
  1732. #if 0
  1733. static void __exit acpi_ec_exit(void)
  1734. {
  1735. acpi_bus_unregister_driver(&acpi_ec_driver);
  1736. acpi_ec_query_exit();
  1737. }
  1738. #endif /* 0 */