hw-me.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365
  1. /*
  2. *
  3. * Intel Management Engine Interface (Intel MEI) Linux driver
  4. * Copyright (c) 2003-2012, Intel Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms and conditions of the GNU General Public License,
  8. * version 2, as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. */
  16. #include <linux/pci.h>
  17. #include <linux/kthread.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/pm_runtime.h>
  20. #include "mei_dev.h"
  21. #include "hbm.h"
  22. #include "hw-me.h"
  23. #include "hw-me-regs.h"
  24. #include "mei-trace.h"
  25. /**
  26. * mei_me_reg_read - Reads 32bit data from the mei device
  27. *
  28. * @hw: the me hardware structure
  29. * @offset: offset from which to read the data
  30. *
  31. * Return: register value (u32)
  32. */
  33. static inline u32 mei_me_reg_read(const struct mei_me_hw *hw,
  34. unsigned long offset)
  35. {
  36. return ioread32(hw->mem_addr + offset);
  37. }
  38. /**
  39. * mei_me_reg_write - Writes 32bit data to the mei device
  40. *
  41. * @hw: the me hardware structure
  42. * @offset: offset from which to write the data
  43. * @value: register value to write (u32)
  44. */
  45. static inline void mei_me_reg_write(const struct mei_me_hw *hw,
  46. unsigned long offset, u32 value)
  47. {
  48. iowrite32(value, hw->mem_addr + offset);
  49. }
  50. /**
  51. * mei_me_mecbrw_read - Reads 32bit data from ME circular buffer
  52. * read window register
  53. *
  54. * @dev: the device structure
  55. *
  56. * Return: ME_CB_RW register value (u32)
  57. */
  58. static inline u32 mei_me_mecbrw_read(const struct mei_device *dev)
  59. {
  60. return mei_me_reg_read(to_me_hw(dev), ME_CB_RW);
  61. }
  62. /**
  63. * mei_me_hcbww_write - write 32bit data to the host circular buffer
  64. *
  65. * @dev: the device structure
  66. * @data: 32bit data to be written to the host circular buffer
  67. */
  68. static inline void mei_me_hcbww_write(struct mei_device *dev, u32 data)
  69. {
  70. mei_me_reg_write(to_me_hw(dev), H_CB_WW, data);
  71. }
  72. /**
  73. * mei_me_mecsr_read - Reads 32bit data from the ME CSR
  74. *
  75. * @dev: the device structure
  76. *
  77. * Return: ME_CSR_HA register value (u32)
  78. */
  79. static inline u32 mei_me_mecsr_read(const struct mei_device *dev)
  80. {
  81. u32 reg;
  82. reg = mei_me_reg_read(to_me_hw(dev), ME_CSR_HA);
  83. trace_mei_reg_read(dev->dev, "ME_CSR_HA", ME_CSR_HA, reg);
  84. return reg;
  85. }
  86. /**
  87. * mei_hcsr_read - Reads 32bit data from the host CSR
  88. *
  89. * @dev: the device structure
  90. *
  91. * Return: H_CSR register value (u32)
  92. */
  93. static inline u32 mei_hcsr_read(const struct mei_device *dev)
  94. {
  95. u32 reg;
  96. reg = mei_me_reg_read(to_me_hw(dev), H_CSR);
  97. trace_mei_reg_read(dev->dev, "H_CSR", H_CSR, reg);
  98. return reg;
  99. }
  100. /**
  101. * mei_hcsr_write - writes H_CSR register to the mei device
  102. *
  103. * @dev: the device structure
  104. * @reg: new register value
  105. */
  106. static inline void mei_hcsr_write(struct mei_device *dev, u32 reg)
  107. {
  108. trace_mei_reg_write(dev->dev, "H_CSR", H_CSR, reg);
  109. mei_me_reg_write(to_me_hw(dev), H_CSR, reg);
  110. }
  111. /**
  112. * mei_hcsr_set - writes H_CSR register to the mei device,
  113. * and ignores the H_IS bit for it is write-one-to-zero.
  114. *
  115. * @dev: the device structure
  116. * @reg: new register value
  117. */
  118. static inline void mei_hcsr_set(struct mei_device *dev, u32 reg)
  119. {
  120. reg &= ~H_CSR_IS_MASK;
  121. mei_hcsr_write(dev, reg);
  122. }
  123. /**
  124. * mei_me_d0i3c_read - Reads 32bit data from the D0I3C register
  125. *
  126. * @dev: the device structure
  127. *
  128. * Return: H_D0I3C register value (u32)
  129. */
  130. static inline u32 mei_me_d0i3c_read(const struct mei_device *dev)
  131. {
  132. u32 reg;
  133. reg = mei_me_reg_read(to_me_hw(dev), H_D0I3C);
  134. trace_mei_reg_read(dev->dev, "H_D0I3C", H_D0I3C, reg);
  135. return reg;
  136. }
  137. /**
  138. * mei_me_d0i3c_write - writes H_D0I3C register to device
  139. *
  140. * @dev: the device structure
  141. * @reg: new register value
  142. */
  143. static inline void mei_me_d0i3c_write(struct mei_device *dev, u32 reg)
  144. {
  145. trace_mei_reg_write(dev->dev, "H_D0I3C", H_D0I3C, reg);
  146. mei_me_reg_write(to_me_hw(dev), H_D0I3C, reg);
  147. }
  148. /**
  149. * mei_me_fw_status - read fw status register from pci config space
  150. *
  151. * @dev: mei device
  152. * @fw_status: fw status register values
  153. *
  154. * Return: 0 on success, error otherwise
  155. */
  156. static int mei_me_fw_status(struct mei_device *dev,
  157. struct mei_fw_status *fw_status)
  158. {
  159. struct pci_dev *pdev = to_pci_dev(dev->dev);
  160. struct mei_me_hw *hw = to_me_hw(dev);
  161. const struct mei_fw_status *fw_src = &hw->cfg->fw_status;
  162. int ret;
  163. int i;
  164. if (!fw_status)
  165. return -EINVAL;
  166. fw_status->count = fw_src->count;
  167. for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) {
  168. ret = pci_read_config_dword(pdev, fw_src->status[i],
  169. &fw_status->status[i]);
  170. trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HSF_X",
  171. fw_src->status[i],
  172. fw_status->status[i]);
  173. if (ret)
  174. return ret;
  175. }
  176. return 0;
  177. }
  178. /**
  179. * mei_me_hw_config - configure hw dependent settings
  180. *
  181. * @dev: mei device
  182. */
  183. static void mei_me_hw_config(struct mei_device *dev)
  184. {
  185. struct pci_dev *pdev = to_pci_dev(dev->dev);
  186. struct mei_me_hw *hw = to_me_hw(dev);
  187. u32 hcsr, reg;
  188. /* Doesn't change in runtime */
  189. hcsr = mei_hcsr_read(dev);
  190. dev->hbuf_depth = (hcsr & H_CBD) >> 24;
  191. reg = 0;
  192. pci_read_config_dword(pdev, PCI_CFG_HFS_1, &reg);
  193. trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
  194. hw->d0i3_supported =
  195. ((reg & PCI_CFG_HFS_1_D0I3_MSK) == PCI_CFG_HFS_1_D0I3_MSK);
  196. hw->pg_state = MEI_PG_OFF;
  197. if (hw->d0i3_supported) {
  198. reg = mei_me_d0i3c_read(dev);
  199. if (reg & H_D0I3C_I3)
  200. hw->pg_state = MEI_PG_ON;
  201. }
  202. }
  203. /**
  204. * mei_me_pg_state - translate internal pg state
  205. * to the mei power gating state
  206. *
  207. * @dev: mei device
  208. *
  209. * Return: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise
  210. */
  211. static inline enum mei_pg_state mei_me_pg_state(struct mei_device *dev)
  212. {
  213. struct mei_me_hw *hw = to_me_hw(dev);
  214. return hw->pg_state;
  215. }
  216. /**
  217. * mei_me_intr_clear - clear and stop interrupts
  218. *
  219. * @dev: the device structure
  220. */
  221. static void mei_me_intr_clear(struct mei_device *dev)
  222. {
  223. u32 hcsr = mei_hcsr_read(dev);
  224. if (hcsr & H_CSR_IS_MASK)
  225. mei_hcsr_write(dev, hcsr);
  226. }
  227. /**
  228. * mei_me_intr_enable - enables mei device interrupts
  229. *
  230. * @dev: the device structure
  231. */
  232. static void mei_me_intr_enable(struct mei_device *dev)
  233. {
  234. u32 hcsr = mei_hcsr_read(dev);
  235. hcsr |= H_CSR_IE_MASK;
  236. mei_hcsr_set(dev, hcsr);
  237. }
  238. /**
  239. * mei_me_intr_disable - disables mei device interrupts
  240. *
  241. * @dev: the device structure
  242. */
  243. static void mei_me_intr_disable(struct mei_device *dev)
  244. {
  245. u32 hcsr = mei_hcsr_read(dev);
  246. hcsr &= ~H_CSR_IE_MASK;
  247. mei_hcsr_set(dev, hcsr);
  248. }
  249. /**
  250. * mei_me_hw_reset_release - release device from the reset
  251. *
  252. * @dev: the device structure
  253. */
  254. static void mei_me_hw_reset_release(struct mei_device *dev)
  255. {
  256. u32 hcsr = mei_hcsr_read(dev);
  257. hcsr |= H_IG;
  258. hcsr &= ~H_RST;
  259. mei_hcsr_set(dev, hcsr);
  260. /* complete this write before we set host ready on another CPU */
  261. mmiowb();
  262. }
  263. /**
  264. * mei_me_host_set_ready - enable device
  265. *
  266. * @dev: mei device
  267. */
  268. static void mei_me_host_set_ready(struct mei_device *dev)
  269. {
  270. u32 hcsr = mei_hcsr_read(dev);
  271. hcsr |= H_CSR_IE_MASK | H_IG | H_RDY;
  272. mei_hcsr_set(dev, hcsr);
  273. }
  274. /**
  275. * mei_me_host_is_ready - check whether the host has turned ready
  276. *
  277. * @dev: mei device
  278. * Return: bool
  279. */
  280. static bool mei_me_host_is_ready(struct mei_device *dev)
  281. {
  282. u32 hcsr = mei_hcsr_read(dev);
  283. return (hcsr & H_RDY) == H_RDY;
  284. }
  285. /**
  286. * mei_me_hw_is_ready - check whether the me(hw) has turned ready
  287. *
  288. * @dev: mei device
  289. * Return: bool
  290. */
  291. static bool mei_me_hw_is_ready(struct mei_device *dev)
  292. {
  293. u32 mecsr = mei_me_mecsr_read(dev);
  294. return (mecsr & ME_RDY_HRA) == ME_RDY_HRA;
  295. }
  296. /**
  297. * mei_me_hw_ready_wait - wait until the me(hw) has turned ready
  298. * or timeout is reached
  299. *
  300. * @dev: mei device
  301. * Return: 0 on success, error otherwise
  302. */
  303. static int mei_me_hw_ready_wait(struct mei_device *dev)
  304. {
  305. mutex_unlock(&dev->device_lock);
  306. wait_event_timeout(dev->wait_hw_ready,
  307. dev->recvd_hw_ready,
  308. mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT));
  309. mutex_lock(&dev->device_lock);
  310. if (!dev->recvd_hw_ready) {
  311. dev_err(dev->dev, "wait hw ready failed\n");
  312. return -ETIME;
  313. }
  314. mei_me_hw_reset_release(dev);
  315. dev->recvd_hw_ready = false;
  316. return 0;
  317. }
  318. /**
  319. * mei_me_hw_start - hw start routine
  320. *
  321. * @dev: mei device
  322. * Return: 0 on success, error otherwise
  323. */
  324. static int mei_me_hw_start(struct mei_device *dev)
  325. {
  326. int ret = mei_me_hw_ready_wait(dev);
  327. if (ret)
  328. return ret;
  329. dev_dbg(dev->dev, "hw is ready\n");
  330. mei_me_host_set_ready(dev);
  331. return ret;
  332. }
  333. /**
  334. * mei_hbuf_filled_slots - gets number of device filled buffer slots
  335. *
  336. * @dev: the device structure
  337. *
  338. * Return: number of filled slots
  339. */
  340. static unsigned char mei_hbuf_filled_slots(struct mei_device *dev)
  341. {
  342. u32 hcsr;
  343. char read_ptr, write_ptr;
  344. hcsr = mei_hcsr_read(dev);
  345. read_ptr = (char) ((hcsr & H_CBRP) >> 8);
  346. write_ptr = (char) ((hcsr & H_CBWP) >> 16);
  347. return (unsigned char) (write_ptr - read_ptr);
  348. }
  349. /**
  350. * mei_me_hbuf_is_empty - checks if host buffer is empty.
  351. *
  352. * @dev: the device structure
  353. *
  354. * Return: true if empty, false - otherwise.
  355. */
  356. static bool mei_me_hbuf_is_empty(struct mei_device *dev)
  357. {
  358. return mei_hbuf_filled_slots(dev) == 0;
  359. }
  360. /**
  361. * mei_me_hbuf_empty_slots - counts write empty slots.
  362. *
  363. * @dev: the device structure
  364. *
  365. * Return: -EOVERFLOW if overflow, otherwise empty slots count
  366. */
  367. static int mei_me_hbuf_empty_slots(struct mei_device *dev)
  368. {
  369. unsigned char filled_slots, empty_slots;
  370. filled_slots = mei_hbuf_filled_slots(dev);
  371. empty_slots = dev->hbuf_depth - filled_slots;
  372. /* check for overflow */
  373. if (filled_slots > dev->hbuf_depth)
  374. return -EOVERFLOW;
  375. return empty_slots;
  376. }
  377. /**
  378. * mei_me_hbuf_max_len - returns size of hw buffer.
  379. *
  380. * @dev: the device structure
  381. *
  382. * Return: size of hw buffer in bytes
  383. */
  384. static size_t mei_me_hbuf_max_len(const struct mei_device *dev)
  385. {
  386. return dev->hbuf_depth * sizeof(u32) - sizeof(struct mei_msg_hdr);
  387. }
  388. /**
  389. * mei_me_write_message - writes a message to mei device.
  390. *
  391. * @dev: the device structure
  392. * @header: mei HECI header of message
  393. * @buf: message payload will be written
  394. *
  395. * Return: -EIO if write has failed
  396. */
  397. static int mei_me_write_message(struct mei_device *dev,
  398. struct mei_msg_hdr *header,
  399. unsigned char *buf)
  400. {
  401. unsigned long rem;
  402. unsigned long length = header->length;
  403. u32 *reg_buf = (u32 *)buf;
  404. u32 hcsr;
  405. u32 dw_cnt;
  406. int i;
  407. int empty_slots;
  408. dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(header));
  409. empty_slots = mei_hbuf_empty_slots(dev);
  410. dev_dbg(dev->dev, "empty slots = %hu.\n", empty_slots);
  411. dw_cnt = mei_data2slots(length);
  412. if (empty_slots < 0 || dw_cnt > empty_slots)
  413. return -EMSGSIZE;
  414. mei_me_hcbww_write(dev, *((u32 *) header));
  415. for (i = 0; i < length / 4; i++)
  416. mei_me_hcbww_write(dev, reg_buf[i]);
  417. rem = length & 0x3;
  418. if (rem > 0) {
  419. u32 reg = 0;
  420. memcpy(&reg, &buf[length - rem], rem);
  421. mei_me_hcbww_write(dev, reg);
  422. }
  423. hcsr = mei_hcsr_read(dev) | H_IG;
  424. mei_hcsr_set(dev, hcsr);
  425. if (!mei_me_hw_is_ready(dev))
  426. return -EIO;
  427. return 0;
  428. }
  429. /**
  430. * mei_me_count_full_read_slots - counts read full slots.
  431. *
  432. * @dev: the device structure
  433. *
  434. * Return: -EOVERFLOW if overflow, otherwise filled slots count
  435. */
  436. static int mei_me_count_full_read_slots(struct mei_device *dev)
  437. {
  438. u32 me_csr;
  439. char read_ptr, write_ptr;
  440. unsigned char buffer_depth, filled_slots;
  441. me_csr = mei_me_mecsr_read(dev);
  442. buffer_depth = (unsigned char)((me_csr & ME_CBD_HRA) >> 24);
  443. read_ptr = (char) ((me_csr & ME_CBRP_HRA) >> 8);
  444. write_ptr = (char) ((me_csr & ME_CBWP_HRA) >> 16);
  445. filled_slots = (unsigned char) (write_ptr - read_ptr);
  446. /* check for overflow */
  447. if (filled_slots > buffer_depth)
  448. return -EOVERFLOW;
  449. dev_dbg(dev->dev, "filled_slots =%08x\n", filled_slots);
  450. return (int)filled_slots;
  451. }
  452. /**
  453. * mei_me_read_slots - reads a message from mei device.
  454. *
  455. * @dev: the device structure
  456. * @buffer: message buffer will be written
  457. * @buffer_length: message size will be read
  458. *
  459. * Return: always 0
  460. */
  461. static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
  462. unsigned long buffer_length)
  463. {
  464. u32 *reg_buf = (u32 *)buffer;
  465. u32 hcsr;
  466. for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32))
  467. *reg_buf++ = mei_me_mecbrw_read(dev);
  468. if (buffer_length > 0) {
  469. u32 reg = mei_me_mecbrw_read(dev);
  470. memcpy(reg_buf, &reg, buffer_length);
  471. }
  472. hcsr = mei_hcsr_read(dev) | H_IG;
  473. mei_hcsr_set(dev, hcsr);
  474. return 0;
  475. }
  476. /**
  477. * mei_me_pg_set - write pg enter register
  478. *
  479. * @dev: the device structure
  480. */
  481. static void mei_me_pg_set(struct mei_device *dev)
  482. {
  483. struct mei_me_hw *hw = to_me_hw(dev);
  484. u32 reg;
  485. reg = mei_me_reg_read(hw, H_HPG_CSR);
  486. trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
  487. reg |= H_HPG_CSR_PGI;
  488. trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
  489. mei_me_reg_write(hw, H_HPG_CSR, reg);
  490. }
  491. /**
  492. * mei_me_pg_unset - write pg exit register
  493. *
  494. * @dev: the device structure
  495. */
  496. static void mei_me_pg_unset(struct mei_device *dev)
  497. {
  498. struct mei_me_hw *hw = to_me_hw(dev);
  499. u32 reg;
  500. reg = mei_me_reg_read(hw, H_HPG_CSR);
  501. trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
  502. WARN(!(reg & H_HPG_CSR_PGI), "PGI is not set\n");
  503. reg |= H_HPG_CSR_PGIHEXR;
  504. trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
  505. mei_me_reg_write(hw, H_HPG_CSR, reg);
  506. }
  507. /**
  508. * mei_me_pg_legacy_enter_sync - perform legacy pg entry procedure
  509. *
  510. * @dev: the device structure
  511. *
  512. * Return: 0 on success an error code otherwise
  513. */
  514. static int mei_me_pg_legacy_enter_sync(struct mei_device *dev)
  515. {
  516. struct mei_me_hw *hw = to_me_hw(dev);
  517. unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
  518. int ret;
  519. dev->pg_event = MEI_PG_EVENT_WAIT;
  520. ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
  521. if (ret)
  522. return ret;
  523. mutex_unlock(&dev->device_lock);
  524. wait_event_timeout(dev->wait_pg,
  525. dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
  526. mutex_lock(&dev->device_lock);
  527. if (dev->pg_event == MEI_PG_EVENT_RECEIVED) {
  528. mei_me_pg_set(dev);
  529. ret = 0;
  530. } else {
  531. ret = -ETIME;
  532. }
  533. dev->pg_event = MEI_PG_EVENT_IDLE;
  534. hw->pg_state = MEI_PG_ON;
  535. return ret;
  536. }
  537. /**
  538. * mei_me_pg_legacy_exit_sync - perform legacy pg exit procedure
  539. *
  540. * @dev: the device structure
  541. *
  542. * Return: 0 on success an error code otherwise
  543. */
  544. static int mei_me_pg_legacy_exit_sync(struct mei_device *dev)
  545. {
  546. struct mei_me_hw *hw = to_me_hw(dev);
  547. unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
  548. int ret;
  549. if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
  550. goto reply;
  551. dev->pg_event = MEI_PG_EVENT_WAIT;
  552. mei_me_pg_unset(dev);
  553. mutex_unlock(&dev->device_lock);
  554. wait_event_timeout(dev->wait_pg,
  555. dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
  556. mutex_lock(&dev->device_lock);
  557. reply:
  558. if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
  559. ret = -ETIME;
  560. goto out;
  561. }
  562. dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
  563. ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_EXIT_RES_CMD);
  564. if (ret)
  565. return ret;
  566. mutex_unlock(&dev->device_lock);
  567. wait_event_timeout(dev->wait_pg,
  568. dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
  569. mutex_lock(&dev->device_lock);
  570. if (dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED)
  571. ret = 0;
  572. else
  573. ret = -ETIME;
  574. out:
  575. dev->pg_event = MEI_PG_EVENT_IDLE;
  576. hw->pg_state = MEI_PG_OFF;
  577. return ret;
  578. }
  579. /**
  580. * mei_me_pg_in_transition - is device now in pg transition
  581. *
  582. * @dev: the device structure
  583. *
  584. * Return: true if in pg transition, false otherwise
  585. */
  586. static bool mei_me_pg_in_transition(struct mei_device *dev)
  587. {
  588. return dev->pg_event >= MEI_PG_EVENT_WAIT &&
  589. dev->pg_event <= MEI_PG_EVENT_INTR_WAIT;
  590. }
  591. /**
  592. * mei_me_pg_is_enabled - detect if PG is supported by HW
  593. *
  594. * @dev: the device structure
  595. *
  596. * Return: true is pg supported, false otherwise
  597. */
  598. static bool mei_me_pg_is_enabled(struct mei_device *dev)
  599. {
  600. struct mei_me_hw *hw = to_me_hw(dev);
  601. u32 reg = mei_me_mecsr_read(dev);
  602. if (hw->d0i3_supported)
  603. return true;
  604. if ((reg & ME_PGIC_HRA) == 0)
  605. goto notsupported;
  606. if (!dev->hbm_f_pg_supported)
  607. goto notsupported;
  608. return true;
  609. notsupported:
  610. dev_dbg(dev->dev, "pg: not supported: d0i3 = %d HGP = %d hbm version %d.%d ?= %d.%d\n",
  611. hw->d0i3_supported,
  612. !!(reg & ME_PGIC_HRA),
  613. dev->version.major_version,
  614. dev->version.minor_version,
  615. HBM_MAJOR_VERSION_PGI,
  616. HBM_MINOR_VERSION_PGI);
  617. return false;
  618. }
  619. /**
  620. * mei_me_d0i3_set - write d0i3 register bit on mei device.
  621. *
  622. * @dev: the device structure
  623. * @intr: ask for interrupt
  624. *
  625. * Return: D0I3C register value
  626. */
  627. static u32 mei_me_d0i3_set(struct mei_device *dev, bool intr)
  628. {
  629. u32 reg = mei_me_d0i3c_read(dev);
  630. reg |= H_D0I3C_I3;
  631. if (intr)
  632. reg |= H_D0I3C_IR;
  633. else
  634. reg &= ~H_D0I3C_IR;
  635. mei_me_d0i3c_write(dev, reg);
  636. /* read it to ensure HW consistency */
  637. reg = mei_me_d0i3c_read(dev);
  638. return reg;
  639. }
  640. /**
  641. * mei_me_d0i3_unset - clean d0i3 register bit on mei device.
  642. *
  643. * @dev: the device structure
  644. *
  645. * Return: D0I3C register value
  646. */
  647. static u32 mei_me_d0i3_unset(struct mei_device *dev)
  648. {
  649. u32 reg = mei_me_d0i3c_read(dev);
  650. reg &= ~H_D0I3C_I3;
  651. reg |= H_D0I3C_IR;
  652. mei_me_d0i3c_write(dev, reg);
  653. /* read it to ensure HW consistency */
  654. reg = mei_me_d0i3c_read(dev);
  655. return reg;
  656. }
  657. /**
  658. * mei_me_d0i3_enter_sync - perform d0i3 entry procedure
  659. *
  660. * @dev: the device structure
  661. *
  662. * Return: 0 on success an error code otherwise
  663. */
  664. static int mei_me_d0i3_enter_sync(struct mei_device *dev)
  665. {
  666. struct mei_me_hw *hw = to_me_hw(dev);
  667. unsigned long d0i3_timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
  668. unsigned long pgi_timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
  669. int ret;
  670. u32 reg;
  671. reg = mei_me_d0i3c_read(dev);
  672. if (reg & H_D0I3C_I3) {
  673. /* we are in d0i3, nothing to do */
  674. dev_dbg(dev->dev, "d0i3 set not needed\n");
  675. ret = 0;
  676. goto on;
  677. }
  678. /* PGI entry procedure */
  679. dev->pg_event = MEI_PG_EVENT_WAIT;
  680. ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
  681. if (ret)
  682. /* FIXME: should we reset here? */
  683. goto out;
  684. mutex_unlock(&dev->device_lock);
  685. wait_event_timeout(dev->wait_pg,
  686. dev->pg_event == MEI_PG_EVENT_RECEIVED, pgi_timeout);
  687. mutex_lock(&dev->device_lock);
  688. if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
  689. ret = -ETIME;
  690. goto out;
  691. }
  692. /* end PGI entry procedure */
  693. dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
  694. reg = mei_me_d0i3_set(dev, true);
  695. if (!(reg & H_D0I3C_CIP)) {
  696. dev_dbg(dev->dev, "d0i3 enter wait not needed\n");
  697. ret = 0;
  698. goto on;
  699. }
  700. mutex_unlock(&dev->device_lock);
  701. wait_event_timeout(dev->wait_pg,
  702. dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, d0i3_timeout);
  703. mutex_lock(&dev->device_lock);
  704. if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
  705. reg = mei_me_d0i3c_read(dev);
  706. if (!(reg & H_D0I3C_I3)) {
  707. ret = -ETIME;
  708. goto out;
  709. }
  710. }
  711. ret = 0;
  712. on:
  713. hw->pg_state = MEI_PG_ON;
  714. out:
  715. dev->pg_event = MEI_PG_EVENT_IDLE;
  716. dev_dbg(dev->dev, "d0i3 enter ret = %d\n", ret);
  717. return ret;
  718. }
  719. /**
  720. * mei_me_d0i3_enter - perform d0i3 entry procedure
  721. * no hbm PG handshake
  722. * no waiting for confirmation; runs with interrupts
  723. * disabled
  724. *
  725. * @dev: the device structure
  726. *
  727. * Return: 0 on success an error code otherwise
  728. */
  729. static int mei_me_d0i3_enter(struct mei_device *dev)
  730. {
  731. struct mei_me_hw *hw = to_me_hw(dev);
  732. u32 reg;
  733. reg = mei_me_d0i3c_read(dev);
  734. if (reg & H_D0I3C_I3) {
  735. /* we are in d0i3, nothing to do */
  736. dev_dbg(dev->dev, "already d0i3 : set not needed\n");
  737. goto on;
  738. }
  739. mei_me_d0i3_set(dev, false);
  740. on:
  741. hw->pg_state = MEI_PG_ON;
  742. dev->pg_event = MEI_PG_EVENT_IDLE;
  743. dev_dbg(dev->dev, "d0i3 enter\n");
  744. return 0;
  745. }
  746. /**
  747. * mei_me_d0i3_exit_sync - perform d0i3 exit procedure
  748. *
  749. * @dev: the device structure
  750. *
  751. * Return: 0 on success an error code otherwise
  752. */
  753. static int mei_me_d0i3_exit_sync(struct mei_device *dev)
  754. {
  755. struct mei_me_hw *hw = to_me_hw(dev);
  756. unsigned long timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
  757. int ret;
  758. u32 reg;
  759. dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
  760. reg = mei_me_d0i3c_read(dev);
  761. if (!(reg & H_D0I3C_I3)) {
  762. /* we are not in d0i3, nothing to do */
  763. dev_dbg(dev->dev, "d0i3 exit not needed\n");
  764. ret = 0;
  765. goto off;
  766. }
  767. reg = mei_me_d0i3_unset(dev);
  768. if (!(reg & H_D0I3C_CIP)) {
  769. dev_dbg(dev->dev, "d0i3 exit wait not needed\n");
  770. ret = 0;
  771. goto off;
  772. }
  773. mutex_unlock(&dev->device_lock);
  774. wait_event_timeout(dev->wait_pg,
  775. dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
  776. mutex_lock(&dev->device_lock);
  777. if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
  778. reg = mei_me_d0i3c_read(dev);
  779. if (reg & H_D0I3C_I3) {
  780. ret = -ETIME;
  781. goto out;
  782. }
  783. }
  784. ret = 0;
  785. off:
  786. hw->pg_state = MEI_PG_OFF;
  787. out:
  788. dev->pg_event = MEI_PG_EVENT_IDLE;
  789. dev_dbg(dev->dev, "d0i3 exit ret = %d\n", ret);
  790. return ret;
  791. }
  792. /**
  793. * mei_me_pg_legacy_intr - perform legacy pg processing
  794. * in interrupt thread handler
  795. *
  796. * @dev: the device structure
  797. */
  798. static void mei_me_pg_legacy_intr(struct mei_device *dev)
  799. {
  800. struct mei_me_hw *hw = to_me_hw(dev);
  801. if (dev->pg_event != MEI_PG_EVENT_INTR_WAIT)
  802. return;
  803. dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
  804. hw->pg_state = MEI_PG_OFF;
  805. if (waitqueue_active(&dev->wait_pg))
  806. wake_up(&dev->wait_pg);
  807. }
  808. /**
  809. * mei_me_d0i3_intr - perform d0i3 processing in interrupt thread handler
  810. *
  811. * @dev: the device structure
  812. */
  813. static void mei_me_d0i3_intr(struct mei_device *dev)
  814. {
  815. struct mei_me_hw *hw = to_me_hw(dev);
  816. if (dev->pg_event == MEI_PG_EVENT_INTR_WAIT &&
  817. (hw->intr_source & H_D0I3C_IS)) {
  818. dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
  819. if (hw->pg_state == MEI_PG_ON) {
  820. hw->pg_state = MEI_PG_OFF;
  821. if (dev->hbm_state != MEI_HBM_IDLE) {
  822. /*
  823. * force H_RDY because it could be
  824. * wiped off during PG
  825. */
  826. dev_dbg(dev->dev, "d0i3 set host ready\n");
  827. mei_me_host_set_ready(dev);
  828. }
  829. } else {
  830. hw->pg_state = MEI_PG_ON;
  831. }
  832. wake_up(&dev->wait_pg);
  833. }
  834. if (hw->pg_state == MEI_PG_ON && (hw->intr_source & H_IS)) {
  835. /*
  836. * HW sent some data and we are in D0i3, so
  837. * we got here because of HW initiated exit from D0i3.
  838. * Start runtime pm resume sequence to exit low power state.
  839. */
  840. dev_dbg(dev->dev, "d0i3 want resume\n");
  841. mei_hbm_pg_resume(dev);
  842. }
  843. }
  844. /**
  845. * mei_me_pg_intr - perform pg processing in interrupt thread handler
  846. *
  847. * @dev: the device structure
  848. */
  849. static void mei_me_pg_intr(struct mei_device *dev)
  850. {
  851. struct mei_me_hw *hw = to_me_hw(dev);
  852. if (hw->d0i3_supported)
  853. mei_me_d0i3_intr(dev);
  854. else
  855. mei_me_pg_legacy_intr(dev);
  856. }
  857. /**
  858. * mei_me_pg_enter_sync - perform runtime pm entry procedure
  859. *
  860. * @dev: the device structure
  861. *
  862. * Return: 0 on success an error code otherwise
  863. */
  864. int mei_me_pg_enter_sync(struct mei_device *dev)
  865. {
  866. struct mei_me_hw *hw = to_me_hw(dev);
  867. if (hw->d0i3_supported)
  868. return mei_me_d0i3_enter_sync(dev);
  869. else
  870. return mei_me_pg_legacy_enter_sync(dev);
  871. }
  872. /**
  873. * mei_me_pg_exit_sync - perform runtime pm exit procedure
  874. *
  875. * @dev: the device structure
  876. *
  877. * Return: 0 on success an error code otherwise
  878. */
  879. int mei_me_pg_exit_sync(struct mei_device *dev)
  880. {
  881. struct mei_me_hw *hw = to_me_hw(dev);
  882. if (hw->d0i3_supported)
  883. return mei_me_d0i3_exit_sync(dev);
  884. else
  885. return mei_me_pg_legacy_exit_sync(dev);
  886. }
  887. /**
  888. * mei_me_hw_reset - resets fw via mei csr register.
  889. *
  890. * @dev: the device structure
  891. * @intr_enable: if interrupt should be enabled after reset.
  892. *
  893. * Return: 0 on success an error code otherwise
  894. */
  895. static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
  896. {
  897. struct mei_me_hw *hw = to_me_hw(dev);
  898. int ret;
  899. u32 hcsr;
  900. if (intr_enable) {
  901. mei_me_intr_enable(dev);
  902. if (hw->d0i3_supported) {
  903. ret = mei_me_d0i3_exit_sync(dev);
  904. if (ret)
  905. return ret;
  906. }
  907. }
  908. pm_runtime_set_active(dev->dev);
  909. hcsr = mei_hcsr_read(dev);
  910. /* H_RST may be found lit before reset is started,
  911. * for example if preceding reset flow hasn't completed.
  912. * In that case asserting H_RST will be ignored, therefore
  913. * we need to clean H_RST bit to start a successful reset sequence.
  914. */
  915. if ((hcsr & H_RST) == H_RST) {
  916. dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr);
  917. hcsr &= ~H_RST;
  918. mei_hcsr_set(dev, hcsr);
  919. hcsr = mei_hcsr_read(dev);
  920. }
  921. hcsr |= H_RST | H_IG | H_CSR_IS_MASK;
  922. if (!intr_enable)
  923. hcsr &= ~H_CSR_IE_MASK;
  924. dev->recvd_hw_ready = false;
  925. mei_hcsr_write(dev, hcsr);
  926. /*
  927. * Host reads the H_CSR once to ensure that the
  928. * posted write to H_CSR completes.
  929. */
  930. hcsr = mei_hcsr_read(dev);
  931. if ((hcsr & H_RST) == 0)
  932. dev_warn(dev->dev, "H_RST is not set = 0x%08X", hcsr);
  933. if ((hcsr & H_RDY) == H_RDY)
  934. dev_warn(dev->dev, "H_RDY is not cleared 0x%08X", hcsr);
  935. if (!intr_enable) {
  936. mei_me_hw_reset_release(dev);
  937. if (hw->d0i3_supported) {
  938. ret = mei_me_d0i3_enter(dev);
  939. if (ret)
  940. return ret;
  941. }
  942. }
  943. return 0;
  944. }
  945. /**
  946. * mei_me_irq_quick_handler - The ISR of the MEI device
  947. *
  948. * @irq: The irq number
  949. * @dev_id: pointer to the device structure
  950. *
  951. * Return: irqreturn_t
  952. */
  953. irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
  954. {
  955. struct mei_device *dev = (struct mei_device *)dev_id;
  956. struct mei_me_hw *hw = to_me_hw(dev);
  957. u32 hcsr;
  958. hcsr = mei_hcsr_read(dev);
  959. if (!(hcsr & H_CSR_IS_MASK))
  960. return IRQ_NONE;
  961. hw->intr_source = hcsr & H_CSR_IS_MASK;
  962. dev_dbg(dev->dev, "interrupt source 0x%08X.\n", hw->intr_source);
  963. /* clear H_IS and H_D0I3C_IS bits in H_CSR to clear the interrupts */
  964. mei_hcsr_write(dev, hcsr);
  965. return IRQ_WAKE_THREAD;
  966. }
  967. /**
  968. * mei_me_irq_thread_handler - function called after ISR to handle the interrupt
  969. * processing.
  970. *
  971. * @irq: The irq number
  972. * @dev_id: pointer to the device structure
  973. *
  974. * Return: irqreturn_t
  975. *
  976. */
  977. irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
  978. {
  979. struct mei_device *dev = (struct mei_device *) dev_id;
  980. struct mei_cl_cb complete_list;
  981. s32 slots;
  982. int rets = 0;
  983. dev_dbg(dev->dev, "function called after ISR to handle the interrupt processing.\n");
  984. /* initialize our complete list */
  985. mutex_lock(&dev->device_lock);
  986. mei_io_list_init(&complete_list);
  987. /* check if ME wants a reset */
  988. if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
  989. dev_warn(dev->dev, "FW not ready: resetting.\n");
  990. schedule_work(&dev->reset_work);
  991. goto end;
  992. }
  993. mei_me_pg_intr(dev);
  994. /* check if we need to start the dev */
  995. if (!mei_host_is_ready(dev)) {
  996. if (mei_hw_is_ready(dev)) {
  997. dev_dbg(dev->dev, "we need to start the dev.\n");
  998. dev->recvd_hw_ready = true;
  999. wake_up(&dev->wait_hw_ready);
  1000. } else {
  1001. dev_dbg(dev->dev, "Spurious Interrupt\n");
  1002. }
  1003. goto end;
  1004. }
  1005. /* check slots available for reading */
  1006. slots = mei_count_full_read_slots(dev);
  1007. while (slots > 0) {
  1008. dev_dbg(dev->dev, "slots to read = %08x\n", slots);
  1009. rets = mei_irq_read_handler(dev, &complete_list, &slots);
  1010. /* There is a race between ME write and interrupt delivery:
  1011. * Not all data is always available immediately after the
  1012. * interrupt, so try to read again on the next interrupt.
  1013. */
  1014. if (rets == -ENODATA)
  1015. break;
  1016. if (rets && dev->dev_state != MEI_DEV_RESETTING) {
  1017. dev_err(dev->dev, "mei_irq_read_handler ret = %d.\n",
  1018. rets);
  1019. schedule_work(&dev->reset_work);
  1020. goto end;
  1021. }
  1022. }
  1023. dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
  1024. /*
  1025. * During PG handshake only allowed write is the replay to the
  1026. * PG exit message, so block calling write function
  1027. * if the pg event is in PG handshake
  1028. */
  1029. if (dev->pg_event != MEI_PG_EVENT_WAIT &&
  1030. dev->pg_event != MEI_PG_EVENT_RECEIVED) {
  1031. rets = mei_irq_write_handler(dev, &complete_list);
  1032. dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
  1033. }
  1034. mei_irq_compl_handler(dev, &complete_list);
  1035. end:
  1036. dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets);
  1037. mutex_unlock(&dev->device_lock);
  1038. return IRQ_HANDLED;
  1039. }
  1040. static const struct mei_hw_ops mei_me_hw_ops = {
  1041. .fw_status = mei_me_fw_status,
  1042. .pg_state = mei_me_pg_state,
  1043. .host_is_ready = mei_me_host_is_ready,
  1044. .hw_is_ready = mei_me_hw_is_ready,
  1045. .hw_reset = mei_me_hw_reset,
  1046. .hw_config = mei_me_hw_config,
  1047. .hw_start = mei_me_hw_start,
  1048. .pg_in_transition = mei_me_pg_in_transition,
  1049. .pg_is_enabled = mei_me_pg_is_enabled,
  1050. .intr_clear = mei_me_intr_clear,
  1051. .intr_enable = mei_me_intr_enable,
  1052. .intr_disable = mei_me_intr_disable,
  1053. .hbuf_free_slots = mei_me_hbuf_empty_slots,
  1054. .hbuf_is_ready = mei_me_hbuf_is_empty,
  1055. .hbuf_max_len = mei_me_hbuf_max_len,
  1056. .write = mei_me_write_message,
  1057. .rdbuf_full_slots = mei_me_count_full_read_slots,
  1058. .read_hdr = mei_me_mecbrw_read,
  1059. .read = mei_me_read_slots
  1060. };
  1061. static bool mei_me_fw_type_nm(struct pci_dev *pdev)
  1062. {
  1063. u32 reg;
  1064. pci_read_config_dword(pdev, PCI_CFG_HFS_2, &reg);
  1065. trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_2", PCI_CFG_HFS_2, reg);
  1066. /* make sure that bit 9 (NM) is up and bit 10 (DM) is down */
  1067. return (reg & 0x600) == 0x200;
  1068. }
  1069. #define MEI_CFG_FW_NM \
  1070. .quirk_probe = mei_me_fw_type_nm
  1071. static bool mei_me_fw_type_sps(struct pci_dev *pdev)
  1072. {
  1073. u32 reg;
  1074. unsigned int devfn;
  1075. /*
  1076. * Read ME FW Status register to check for SPS Firmware
  1077. * The SPS FW is only signaled in pci function 0
  1078. */
  1079. devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
  1080. pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_1, &reg);
  1081. trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
  1082. /* if bits [19:16] = 15, running SPS Firmware */
  1083. return (reg & 0xf0000) == 0xf0000;
  1084. }
  1085. #define MEI_CFG_FW_SPS \
  1086. .quirk_probe = mei_me_fw_type_sps
  1087. #define MEI_CFG_LEGACY_HFS \
  1088. .fw_status.count = 0
  1089. #define MEI_CFG_ICH_HFS \
  1090. .fw_status.count = 1, \
  1091. .fw_status.status[0] = PCI_CFG_HFS_1
  1092. #define MEI_CFG_PCH_HFS \
  1093. .fw_status.count = 2, \
  1094. .fw_status.status[0] = PCI_CFG_HFS_1, \
  1095. .fw_status.status[1] = PCI_CFG_HFS_2
  1096. #define MEI_CFG_PCH8_HFS \
  1097. .fw_status.count = 6, \
  1098. .fw_status.status[0] = PCI_CFG_HFS_1, \
  1099. .fw_status.status[1] = PCI_CFG_HFS_2, \
  1100. .fw_status.status[2] = PCI_CFG_HFS_3, \
  1101. .fw_status.status[3] = PCI_CFG_HFS_4, \
  1102. .fw_status.status[4] = PCI_CFG_HFS_5, \
  1103. .fw_status.status[5] = PCI_CFG_HFS_6
  1104. /* ICH Legacy devices */
  1105. const struct mei_cfg mei_me_legacy_cfg = {
  1106. MEI_CFG_LEGACY_HFS,
  1107. };
  1108. /* ICH devices */
  1109. const struct mei_cfg mei_me_ich_cfg = {
  1110. MEI_CFG_ICH_HFS,
  1111. };
  1112. /* PCH devices */
  1113. const struct mei_cfg mei_me_pch_cfg = {
  1114. MEI_CFG_PCH_HFS,
  1115. };
  1116. /* PCH Cougar Point and Patsburg with quirk for Node Manager exclusion */
  1117. const struct mei_cfg mei_me_pch_cpt_pbg_cfg = {
  1118. MEI_CFG_PCH_HFS,
  1119. MEI_CFG_FW_NM,
  1120. };
  1121. /* PCH8 Lynx Point and newer devices */
  1122. const struct mei_cfg mei_me_pch8_cfg = {
  1123. MEI_CFG_PCH8_HFS,
  1124. };
  1125. /* PCH8 Lynx Point with quirk for SPS Firmware exclusion */
  1126. const struct mei_cfg mei_me_pch8_sps_cfg = {
  1127. MEI_CFG_PCH8_HFS,
  1128. MEI_CFG_FW_SPS,
  1129. };
  1130. /**
  1131. * mei_me_dev_init - allocates and initializes the mei device structure
  1132. *
  1133. * @pdev: The pci device structure
  1134. * @cfg: per device generation config
  1135. *
  1136. * Return: The mei_device_device pointer on success, NULL on failure.
  1137. */
  1138. struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
  1139. const struct mei_cfg *cfg)
  1140. {
  1141. struct mei_device *dev;
  1142. struct mei_me_hw *hw;
  1143. dev = kzalloc(sizeof(struct mei_device) +
  1144. sizeof(struct mei_me_hw), GFP_KERNEL);
  1145. if (!dev)
  1146. return NULL;
  1147. hw = to_me_hw(dev);
  1148. mei_device_init(dev, &pdev->dev, &mei_me_hw_ops);
  1149. hw->cfg = cfg;
  1150. return dev;
  1151. }