cppc_acpi.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256
  1. /*
  2. * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers.
  3. *
  4. * (C) Copyright 2014, 2015 Linaro Ltd.
  5. * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; version 2
  10. * of the License.
  11. *
  12. * CPPC describes a few methods for controlling CPU performance using
  13. * information from a per CPU table called CPC. This table is described in
  14. * the ACPI v5.0+ specification. The table consists of a list of
  15. * registers which may be memory mapped or hardware registers and also may
  16. * include some static integer values.
  17. *
  18. * CPU performance is on an abstract continuous scale as against a discretized
  19. * P-state scale which is tied to CPU frequency only. In brief, the basic
  20. * operation involves:
  21. *
  22. * - OS makes a CPU performance request. (Can provide min and max bounds)
  23. *
  24. * - Platform (such as BMC) is free to optimize request within requested bounds
  25. * depending on power/thermal budgets etc.
  26. *
  27. * - Platform conveys its decision back to OS
  28. *
  29. * The communication between OS and platform occurs through another medium
  30. * called (PCC) Platform Communication Channel. This is a generic mailbox like
  31. * mechanism which includes doorbell semantics to indicate register updates.
  32. * See drivers/mailbox/pcc.c for details on PCC.
  33. *
  34. * Finer details about the PCC and CPPC spec are available in the ACPI v5.1 and
  35. * above specifications.
  36. */
  37. #define pr_fmt(fmt) "ACPI CPPC: " fmt
  38. #include <linux/cpufreq.h>
  39. #include <linux/delay.h>
  40. #include <linux/ktime.h>
  41. #include <linux/rwsem.h>
  42. #include <linux/wait.h>
  43. #include <acpi/cppc_acpi.h>
  44. struct cppc_pcc_data {
  45. struct mbox_chan *pcc_channel;
  46. void __iomem *pcc_comm_addr;
  47. int pcc_subspace_idx;
  48. bool pcc_channel_acquired;
  49. ktime_t deadline;
  50. unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
  51. bool pending_pcc_write_cmd; /* Any pending/batched PCC write cmds? */
  52. bool platform_owns_pcc; /* Ownership of PCC subspace */
  53. unsigned int pcc_write_cnt; /* Running count of PCC write commands */
  54. /*
  55. * Lock to provide controlled access to the PCC channel.
  56. *
  57. * For performance critical usecases(currently cppc_set_perf)
  58. * We need to take read_lock and check if channel belongs to OSPM
  59. * before reading or writing to PCC subspace
  60. * We need to take write_lock before transferring the channel
  61. * ownership to the platform via a Doorbell
  62. * This allows us to batch a number of CPPC requests if they happen
  63. * to originate in about the same time
  64. *
  65. * For non-performance critical usecases(init)
  66. * Take write_lock for all purposes which gives exclusive access
  67. */
  68. struct rw_semaphore pcc_lock;
  69. /* Wait queue for CPUs whose requests were batched */
  70. wait_queue_head_t pcc_write_wait_q;
  71. };
  72. /* Structure to represent the single PCC channel */
  73. static struct cppc_pcc_data pcc_data = {
  74. .pcc_subspace_idx = -1,
  75. .platform_owns_pcc = true,
  76. };
  77. /*
  78. * The cpc_desc structure contains the ACPI register details
  79. * as described in the per CPU _CPC tables. The details
  80. * include the type of register (e.g. PCC, System IO, FFH etc.)
  81. * and destination addresses which lets us READ/WRITE CPU performance
  82. * information using the appropriate I/O methods.
  83. */
  84. static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
  85. /* pcc mapped address + header size + offset within PCC subspace */
  86. #define GET_PCC_VADDR(offs) (pcc_data.pcc_comm_addr + 0x8 + (offs))
  87. /* Check if a CPC register is in PCC */
  88. #define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
  89. (cpc)->cpc_entry.reg.space_id == \
  90. ACPI_ADR_SPACE_PLATFORM_COMM)
  91. /* Evalutes to True if reg is a NULL register descriptor */
  92. #define IS_NULL_REG(reg) ((reg)->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY && \
  93. (reg)->address == 0 && \
  94. (reg)->bit_width == 0 && \
  95. (reg)->bit_offset == 0 && \
  96. (reg)->access_width == 0)
  97. /* Evalutes to True if an optional cpc field is supported */
  98. #define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ? \
  99. !!(cpc)->cpc_entry.int_value : \
  100. !IS_NULL_REG(&(cpc)->cpc_entry.reg))
  101. /*
  102. * Arbitrary Retries in case the remote processor is slow to respond
  103. * to PCC commands. Keeping it high enough to cover emulators where
  104. * the processors run painfully slow.
  105. */
  106. #define NUM_RETRIES 500
  107. #define define_one_cppc_ro(_name) \
  108. static struct kobj_attribute _name = \
  109. __ATTR(_name, 0444, show_##_name, NULL)
  110. #define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj)
  111. #define show_cppc_data(access_fn, struct_name, member_name) \
  112. static ssize_t show_##member_name(struct kobject *kobj, \
  113. struct kobj_attribute *attr, char *buf) \
  114. { \
  115. struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); \
  116. struct struct_name st_name = {0}; \
  117. int ret; \
  118. \
  119. ret = access_fn(cpc_ptr->cpu_id, &st_name); \
  120. if (ret) \
  121. return ret; \
  122. \
  123. return scnprintf(buf, PAGE_SIZE, "%llu\n", \
  124. (u64)st_name.member_name); \
  125. } \
  126. define_one_cppc_ro(member_name)
  127. show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, highest_perf);
  128. show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_perf);
  129. show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_perf);
  130. show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_nonlinear_perf);
  131. show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
  132. show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
  133. static ssize_t show_feedback_ctrs(struct kobject *kobj,
  134. struct kobj_attribute *attr, char *buf)
  135. {
  136. struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
  137. struct cppc_perf_fb_ctrs fb_ctrs = {0};
  138. int ret;
  139. ret = cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs);
  140. if (ret)
  141. return ret;
  142. return scnprintf(buf, PAGE_SIZE, "ref:%llu del:%llu\n",
  143. fb_ctrs.reference, fb_ctrs.delivered);
  144. }
  145. define_one_cppc_ro(feedback_ctrs);
  146. static struct attribute *cppc_attrs[] = {
  147. &feedback_ctrs.attr,
  148. &reference_perf.attr,
  149. &wraparound_time.attr,
  150. &highest_perf.attr,
  151. &lowest_perf.attr,
  152. &lowest_nonlinear_perf.attr,
  153. &nominal_perf.attr,
  154. NULL
  155. };
  156. static struct kobj_type cppc_ktype = {
  157. .sysfs_ops = &kobj_sysfs_ops,
  158. .default_attrs = cppc_attrs,
  159. };
  160. static int check_pcc_chan(bool chk_err_bit)
  161. {
  162. int ret = -EIO, status = 0;
  163. struct acpi_pcct_shared_memory __iomem *generic_comm_base = pcc_data.pcc_comm_addr;
  164. ktime_t next_deadline = ktime_add(ktime_get(), pcc_data.deadline);
  165. if (!pcc_data.platform_owns_pcc)
  166. return 0;
  167. /* Retry in case the remote processor was too slow to catch up. */
  168. while (!ktime_after(ktime_get(), next_deadline)) {
  169. /*
  170. * Per spec, prior to boot the PCC space wil be initialized by
  171. * platform and should have set the command completion bit when
  172. * PCC can be used by OSPM
  173. */
  174. status = readw_relaxed(&generic_comm_base->status);
  175. if (status & PCC_CMD_COMPLETE_MASK) {
  176. ret = 0;
  177. if (chk_err_bit && (status & PCC_ERROR_MASK))
  178. ret = -EIO;
  179. break;
  180. }
  181. /*
  182. * Reducing the bus traffic in case this loop takes longer than
  183. * a few retries.
  184. */
  185. udelay(3);
  186. }
  187. if (likely(!ret))
  188. pcc_data.platform_owns_pcc = false;
  189. else
  190. pr_err("PCC check channel failed. Status=%x\n", status);
  191. return ret;
  192. }
  193. /*
  194. * This function transfers the ownership of the PCC to the platform
  195. * So it must be called while holding write_lock(pcc_lock)
  196. */
  197. static int send_pcc_cmd(u16 cmd)
  198. {
  199. int ret = -EIO, i;
  200. struct acpi_pcct_shared_memory *generic_comm_base =
  201. (struct acpi_pcct_shared_memory *) pcc_data.pcc_comm_addr;
  202. static ktime_t last_cmd_cmpl_time, last_mpar_reset;
  203. static int mpar_count;
  204. unsigned int time_delta;
  205. /*
  206. * For CMD_WRITE we know for a fact the caller should have checked
  207. * the channel before writing to PCC space
  208. */
  209. if (cmd == CMD_READ) {
  210. /*
  211. * If there are pending cpc_writes, then we stole the channel
  212. * before write completion, so first send a WRITE command to
  213. * platform
  214. */
  215. if (pcc_data.pending_pcc_write_cmd)
  216. send_pcc_cmd(CMD_WRITE);
  217. ret = check_pcc_chan(false);
  218. if (ret)
  219. goto end;
  220. } else /* CMD_WRITE */
  221. pcc_data.pending_pcc_write_cmd = FALSE;
  222. /*
  223. * Handle the Minimum Request Turnaround Time(MRTT)
  224. * "The minimum amount of time that OSPM must wait after the completion
  225. * of a command before issuing the next command, in microseconds"
  226. */
  227. if (pcc_data.pcc_mrtt) {
  228. time_delta = ktime_us_delta(ktime_get(), last_cmd_cmpl_time);
  229. if (pcc_data.pcc_mrtt > time_delta)
  230. udelay(pcc_data.pcc_mrtt - time_delta);
  231. }
  232. /*
  233. * Handle the non-zero Maximum Periodic Access Rate(MPAR)
  234. * "The maximum number of periodic requests that the subspace channel can
  235. * support, reported in commands per minute. 0 indicates no limitation."
  236. *
  237. * This parameter should be ideally zero or large enough so that it can
  238. * handle maximum number of requests that all the cores in the system can
  239. * collectively generate. If it is not, we will follow the spec and just
  240. * not send the request to the platform after hitting the MPAR limit in
  241. * any 60s window
  242. */
  243. if (pcc_data.pcc_mpar) {
  244. if (mpar_count == 0) {
  245. time_delta = ktime_ms_delta(ktime_get(), last_mpar_reset);
  246. if (time_delta < 60 * MSEC_PER_SEC) {
  247. pr_debug("PCC cmd not sent due to MPAR limit");
  248. ret = -EIO;
  249. goto end;
  250. }
  251. last_mpar_reset = ktime_get();
  252. mpar_count = pcc_data.pcc_mpar;
  253. }
  254. mpar_count--;
  255. }
  256. /* Write to the shared comm region. */
  257. writew_relaxed(cmd, &generic_comm_base->command);
  258. /* Flip CMD COMPLETE bit */
  259. writew_relaxed(0, &generic_comm_base->status);
  260. pcc_data.platform_owns_pcc = true;
  261. /* Ring doorbell */
  262. ret = mbox_send_message(pcc_data.pcc_channel, &cmd);
  263. if (ret < 0) {
  264. pr_err("Err sending PCC mbox message. cmd:%d, ret:%d\n",
  265. cmd, ret);
  266. goto end;
  267. }
  268. /* wait for completion and check for PCC errro bit */
  269. ret = check_pcc_chan(true);
  270. if (pcc_data.pcc_mrtt)
  271. last_cmd_cmpl_time = ktime_get();
  272. if (pcc_data.pcc_channel->mbox->txdone_irq)
  273. mbox_chan_txdone(pcc_data.pcc_channel, ret);
  274. else
  275. mbox_client_txdone(pcc_data.pcc_channel, ret);
  276. end:
  277. if (cmd == CMD_WRITE) {
  278. if (unlikely(ret)) {
  279. for_each_possible_cpu(i) {
  280. struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i);
  281. if (!desc)
  282. continue;
  283. if (desc->write_cmd_id == pcc_data.pcc_write_cnt)
  284. desc->write_cmd_status = ret;
  285. }
  286. }
  287. pcc_data.pcc_write_cnt++;
  288. wake_up_all(&pcc_data.pcc_write_wait_q);
  289. }
  290. return ret;
  291. }
  292. static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
  293. {
  294. if (ret < 0)
  295. pr_debug("TX did not complete: CMD sent:%x, ret:%d\n",
  296. *(u16 *)msg, ret);
  297. else
  298. pr_debug("TX completed. CMD sent:%x, ret:%d\n",
  299. *(u16 *)msg, ret);
  300. }
  301. struct mbox_client cppc_mbox_cl = {
  302. .tx_done = cppc_chan_tx_done,
  303. .knows_txdone = true,
  304. };
  305. static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
  306. {
  307. int result = -EFAULT;
  308. acpi_status status = AE_OK;
  309. struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
  310. struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
  311. struct acpi_buffer state = {0, NULL};
  312. union acpi_object *psd = NULL;
  313. struct acpi_psd_package *pdomain;
  314. status = acpi_evaluate_object_typed(handle, "_PSD", NULL,
  315. &buffer, ACPI_TYPE_PACKAGE);
  316. if (status == AE_NOT_FOUND) /* _PSD is optional */
  317. return 0;
  318. if (ACPI_FAILURE(status))
  319. return -ENODEV;
  320. psd = buffer.pointer;
  321. if (!psd || psd->package.count != 1) {
  322. pr_debug("Invalid _PSD data\n");
  323. goto end;
  324. }
  325. pdomain = &(cpc_ptr->domain_info);
  326. state.length = sizeof(struct acpi_psd_package);
  327. state.pointer = pdomain;
  328. status = acpi_extract_package(&(psd->package.elements[0]),
  329. &format, &state);
  330. if (ACPI_FAILURE(status)) {
  331. pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr->cpu_id);
  332. goto end;
  333. }
  334. if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
  335. pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr->cpu_id);
  336. goto end;
  337. }
  338. if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
  339. pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr->cpu_id);
  340. goto end;
  341. }
  342. if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
  343. pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
  344. pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
  345. pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr->cpu_id);
  346. goto end;
  347. }
  348. result = 0;
  349. end:
  350. kfree(buffer.pointer);
  351. return result;
  352. }
  353. /**
  354. * acpi_get_psd_map - Map the CPUs in a common freq domain.
  355. * @all_cpu_data: Ptrs to CPU specific CPPC data including PSD info.
  356. *
  357. * Return: 0 for success or negative value for err.
  358. */
  359. int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data)
  360. {
  361. int count_target;
  362. int retval = 0;
  363. unsigned int i, j;
  364. cpumask_var_t covered_cpus;
  365. struct cppc_cpudata *pr, *match_pr;
  366. struct acpi_psd_package *pdomain;
  367. struct acpi_psd_package *match_pdomain;
  368. struct cpc_desc *cpc_ptr, *match_cpc_ptr;
  369. if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
  370. return -ENOMEM;
  371. /*
  372. * Now that we have _PSD data from all CPUs, lets setup P-state
  373. * domain info.
  374. */
  375. for_each_possible_cpu(i) {
  376. pr = all_cpu_data[i];
  377. if (!pr)
  378. continue;
  379. if (cpumask_test_cpu(i, covered_cpus))
  380. continue;
  381. cpc_ptr = per_cpu(cpc_desc_ptr, i);
  382. if (!cpc_ptr) {
  383. retval = -EFAULT;
  384. goto err_ret;
  385. }
  386. pdomain = &(cpc_ptr->domain_info);
  387. cpumask_set_cpu(i, pr->shared_cpu_map);
  388. cpumask_set_cpu(i, covered_cpus);
  389. if (pdomain->num_processors <= 1)
  390. continue;
  391. /* Validate the Domain info */
  392. count_target = pdomain->num_processors;
  393. if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
  394. pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
  395. else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
  396. pr->shared_type = CPUFREQ_SHARED_TYPE_HW;
  397. else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
  398. pr->shared_type = CPUFREQ_SHARED_TYPE_ANY;
  399. for_each_possible_cpu(j) {
  400. if (i == j)
  401. continue;
  402. match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
  403. if (!match_cpc_ptr) {
  404. retval = -EFAULT;
  405. goto err_ret;
  406. }
  407. match_pdomain = &(match_cpc_ptr->domain_info);
  408. if (match_pdomain->domain != pdomain->domain)
  409. continue;
  410. /* Here i and j are in the same domain */
  411. if (match_pdomain->num_processors != count_target) {
  412. retval = -EFAULT;
  413. goto err_ret;
  414. }
  415. if (pdomain->coord_type != match_pdomain->coord_type) {
  416. retval = -EFAULT;
  417. goto err_ret;
  418. }
  419. cpumask_set_cpu(j, covered_cpus);
  420. cpumask_set_cpu(j, pr->shared_cpu_map);
  421. }
  422. for_each_possible_cpu(j) {
  423. if (i == j)
  424. continue;
  425. match_pr = all_cpu_data[j];
  426. if (!match_pr)
  427. continue;
  428. match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
  429. if (!match_cpc_ptr) {
  430. retval = -EFAULT;
  431. goto err_ret;
  432. }
  433. match_pdomain = &(match_cpc_ptr->domain_info);
  434. if (match_pdomain->domain != pdomain->domain)
  435. continue;
  436. match_pr->shared_type = pr->shared_type;
  437. cpumask_copy(match_pr->shared_cpu_map,
  438. pr->shared_cpu_map);
  439. }
  440. }
  441. err_ret:
  442. for_each_possible_cpu(i) {
  443. pr = all_cpu_data[i];
  444. if (!pr)
  445. continue;
  446. /* Assume no coordination on any error parsing domain info */
  447. if (retval) {
  448. cpumask_clear(pr->shared_cpu_map);
  449. cpumask_set_cpu(i, pr->shared_cpu_map);
  450. pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
  451. }
  452. }
  453. free_cpumask_var(covered_cpus);
  454. return retval;
  455. }
  456. EXPORT_SYMBOL_GPL(acpi_get_psd_map);
  457. static int register_pcc_channel(int pcc_subspace_idx)
  458. {
  459. struct acpi_pcct_hw_reduced *cppc_ss;
  460. u64 usecs_lat;
  461. if (pcc_subspace_idx >= 0) {
  462. pcc_data.pcc_channel = pcc_mbox_request_channel(&cppc_mbox_cl,
  463. pcc_subspace_idx);
  464. if (IS_ERR(pcc_data.pcc_channel)) {
  465. pr_err("Failed to find PCC communication channel\n");
  466. return -ENODEV;
  467. }
  468. /*
  469. * The PCC mailbox controller driver should
  470. * have parsed the PCCT (global table of all
  471. * PCC channels) and stored pointers to the
  472. * subspace communication region in con_priv.
  473. */
  474. cppc_ss = (pcc_data.pcc_channel)->con_priv;
  475. if (!cppc_ss) {
  476. pr_err("No PCC subspace found for CPPC\n");
  477. return -ENODEV;
  478. }
  479. /*
  480. * cppc_ss->latency is just a Nominal value. In reality
  481. * the remote processor could be much slower to reply.
  482. * So add an arbitrary amount of wait on top of Nominal.
  483. */
  484. usecs_lat = NUM_RETRIES * cppc_ss->latency;
  485. pcc_data.deadline = ns_to_ktime(usecs_lat * NSEC_PER_USEC);
  486. pcc_data.pcc_mrtt = cppc_ss->min_turnaround_time;
  487. pcc_data.pcc_mpar = cppc_ss->max_access_rate;
  488. pcc_data.pcc_nominal = cppc_ss->latency;
  489. pcc_data.pcc_comm_addr = acpi_os_ioremap(cppc_ss->base_address, cppc_ss->length);
  490. if (!pcc_data.pcc_comm_addr) {
  491. pr_err("Failed to ioremap PCC comm region mem\n");
  492. return -ENOMEM;
  493. }
  494. /* Set flag so that we dont come here for each CPU. */
  495. pcc_data.pcc_channel_acquired = true;
  496. }
  497. return 0;
  498. }
  499. /**
  500. * cpc_ffh_supported() - check if FFH reading supported
  501. *
  502. * Check if the architecture has support for functional fixed hardware
  503. * read/write capability.
  504. *
  505. * Return: true for supported, false for not supported
  506. */
  507. bool __weak cpc_ffh_supported(void)
  508. {
  509. return false;
  510. }
  511. /*
  512. * An example CPC table looks like the following.
  513. *
  514. * Name(_CPC, Package()
  515. * {
  516. * 17,
  517. * NumEntries
  518. * 1,
  519. * // Revision
  520. * ResourceTemplate(){Register(PCC, 32, 0, 0x120, 2)},
  521. * // Highest Performance
  522. * ResourceTemplate(){Register(PCC, 32, 0, 0x124, 2)},
  523. * // Nominal Performance
  524. * ResourceTemplate(){Register(PCC, 32, 0, 0x128, 2)},
  525. * // Lowest Nonlinear Performance
  526. * ResourceTemplate(){Register(PCC, 32, 0, 0x12C, 2)},
  527. * // Lowest Performance
  528. * ResourceTemplate(){Register(PCC, 32, 0, 0x130, 2)},
  529. * // Guaranteed Performance Register
  530. * ResourceTemplate(){Register(PCC, 32, 0, 0x110, 2)},
  531. * // Desired Performance Register
  532. * ResourceTemplate(){Register(SystemMemory, 0, 0, 0, 0)},
  533. * ..
  534. * ..
  535. * ..
  536. *
  537. * }
  538. * Each Register() encodes how to access that specific register.
  539. * e.g. a sample PCC entry has the following encoding:
  540. *
  541. * Register (
  542. * PCC,
  543. * AddressSpaceKeyword
  544. * 8,
  545. * //RegisterBitWidth
  546. * 8,
  547. * //RegisterBitOffset
  548. * 0x30,
  549. * //RegisterAddress
  550. * 9
  551. * //AccessSize (subspace ID)
  552. * 0
  553. * )
  554. * }
  555. */
  556. /**
  557. * acpi_cppc_processor_probe - Search for per CPU _CPC objects.
  558. * @pr: Ptr to acpi_processor containing this CPUs logical Id.
  559. *
  560. * Return: 0 for success or negative value for err.
  561. */
  562. int acpi_cppc_processor_probe(struct acpi_processor *pr)
  563. {
  564. struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
  565. union acpi_object *out_obj, *cpc_obj;
  566. struct cpc_desc *cpc_ptr;
  567. struct cpc_reg *gas_t;
  568. struct device *cpu_dev;
  569. acpi_handle handle = pr->handle;
  570. unsigned int num_ent, i, cpc_rev;
  571. acpi_status status;
  572. int ret = -EFAULT;
  573. /* Parse the ACPI _CPC table for this cpu. */
  574. status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
  575. ACPI_TYPE_PACKAGE);
  576. if (ACPI_FAILURE(status)) {
  577. ret = -ENODEV;
  578. goto out_buf_free;
  579. }
  580. out_obj = (union acpi_object *) output.pointer;
  581. cpc_ptr = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL);
  582. if (!cpc_ptr) {
  583. ret = -ENOMEM;
  584. goto out_buf_free;
  585. }
  586. /* First entry is NumEntries. */
  587. cpc_obj = &out_obj->package.elements[0];
  588. if (cpc_obj->type == ACPI_TYPE_INTEGER) {
  589. num_ent = cpc_obj->integer.value;
  590. } else {
  591. pr_debug("Unexpected entry type(%d) for NumEntries\n",
  592. cpc_obj->type);
  593. goto out_free;
  594. }
  595. /* Only support CPPCv2. Bail otherwise. */
  596. if (num_ent != CPPC_NUM_ENT) {
  597. pr_debug("Firmware exports %d entries. Expected: %d\n",
  598. num_ent, CPPC_NUM_ENT);
  599. goto out_free;
  600. }
  601. cpc_ptr->num_entries = num_ent;
  602. /* Second entry should be revision. */
  603. cpc_obj = &out_obj->package.elements[1];
  604. if (cpc_obj->type == ACPI_TYPE_INTEGER) {
  605. cpc_rev = cpc_obj->integer.value;
  606. } else {
  607. pr_debug("Unexpected entry type(%d) for Revision\n",
  608. cpc_obj->type);
  609. goto out_free;
  610. }
  611. if (cpc_rev != CPPC_REV) {
  612. pr_debug("Firmware exports revision:%d. Expected:%d\n",
  613. cpc_rev, CPPC_REV);
  614. goto out_free;
  615. }
  616. /* Iterate through remaining entries in _CPC */
  617. for (i = 2; i < num_ent; i++) {
  618. cpc_obj = &out_obj->package.elements[i];
  619. if (cpc_obj->type == ACPI_TYPE_INTEGER) {
  620. cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER;
  621. cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value;
  622. } else if (cpc_obj->type == ACPI_TYPE_BUFFER) {
  623. gas_t = (struct cpc_reg *)
  624. cpc_obj->buffer.pointer;
  625. /*
  626. * The PCC Subspace index is encoded inside
  627. * the CPC table entries. The same PCC index
  628. * will be used for all the PCC entries,
  629. * so extract it only once.
  630. */
  631. if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
  632. if (pcc_data.pcc_subspace_idx < 0)
  633. pcc_data.pcc_subspace_idx = gas_t->access_width;
  634. else if (pcc_data.pcc_subspace_idx != gas_t->access_width) {
  635. pr_debug("Mismatched PCC ids.\n");
  636. goto out_free;
  637. }
  638. } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
  639. if (gas_t->address) {
  640. void __iomem *addr;
  641. addr = ioremap(gas_t->address, gas_t->bit_width/8);
  642. if (!addr)
  643. goto out_free;
  644. cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr;
  645. }
  646. } else {
  647. if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {
  648. /* Support only PCC ,SYS MEM and FFH type regs */
  649. pr_debug("Unsupported register type: %d\n", gas_t->space_id);
  650. goto out_free;
  651. }
  652. }
  653. cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER;
  654. memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t));
  655. } else {
  656. pr_debug("Err in entry:%d in CPC table of CPU:%d \n", i, pr->id);
  657. goto out_free;
  658. }
  659. }
  660. /* Store CPU Logical ID */
  661. cpc_ptr->cpu_id = pr->id;
  662. /* Parse PSD data for this CPU */
  663. ret = acpi_get_psd(cpc_ptr, handle);
  664. if (ret)
  665. goto out_free;
  666. /* Register PCC channel once for all CPUs. */
  667. if (!pcc_data.pcc_channel_acquired) {
  668. ret = register_pcc_channel(pcc_data.pcc_subspace_idx);
  669. if (ret)
  670. goto out_free;
  671. init_rwsem(&pcc_data.pcc_lock);
  672. init_waitqueue_head(&pcc_data.pcc_write_wait_q);
  673. }
  674. /* Everything looks okay */
  675. pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
  676. /* Add per logical CPU nodes for reading its feedback counters. */
  677. cpu_dev = get_cpu_device(pr->id);
  678. if (!cpu_dev) {
  679. ret = -EINVAL;
  680. goto out_free;
  681. }
  682. /* Plug PSD data into this CPUs CPC descriptor. */
  683. per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
  684. ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
  685. "acpi_cppc");
  686. if (ret) {
  687. per_cpu(cpc_desc_ptr, pr->id) = NULL;
  688. kobject_put(&cpc_ptr->kobj);
  689. goto out_free;
  690. }
  691. kfree(output.pointer);
  692. return 0;
  693. out_free:
  694. /* Free all the mapped sys mem areas for this CPU */
  695. for (i = 2; i < cpc_ptr->num_entries; i++) {
  696. void __iomem *addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
  697. if (addr)
  698. iounmap(addr);
  699. }
  700. kfree(cpc_ptr);
  701. out_buf_free:
  702. kfree(output.pointer);
  703. return ret;
  704. }
  705. EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe);
  706. /**
  707. * acpi_cppc_processor_exit - Cleanup CPC structs.
  708. * @pr: Ptr to acpi_processor containing this CPUs logical Id.
  709. *
  710. * Return: Void
  711. */
  712. void acpi_cppc_processor_exit(struct acpi_processor *pr)
  713. {
  714. struct cpc_desc *cpc_ptr;
  715. unsigned int i;
  716. void __iomem *addr;
  717. cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
  718. if (!cpc_ptr)
  719. return;
  720. /* Free all the mapped sys mem areas for this CPU */
  721. for (i = 2; i < cpc_ptr->num_entries; i++) {
  722. addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
  723. if (addr)
  724. iounmap(addr);
  725. }
  726. kobject_put(&cpc_ptr->kobj);
  727. kfree(cpc_ptr);
  728. }
  729. EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
  730. /**
  731. * cpc_read_ffh() - Read FFH register
  732. * @cpunum: cpu number to read
  733. * @reg: cppc register information
  734. * @val: place holder for return value
  735. *
  736. * Read bit_width bits from a specified address and bit_offset
  737. *
  738. * Return: 0 for success and error code
  739. */
  740. int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
  741. {
  742. return -ENOTSUPP;
  743. }
  744. /**
  745. * cpc_write_ffh() - Write FFH register
  746. * @cpunum: cpu number to write
  747. * @reg: cppc register information
  748. * @val: value to write
  749. *
  750. * Write value of bit_width bits to a specified address and bit_offset
  751. *
  752. * Return: 0 for success and error code
  753. */
  754. int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
  755. {
  756. return -ENOTSUPP;
  757. }
  758. /*
  759. * Since cpc_read and cpc_write are called while holding pcc_lock, it should be
  760. * as fast as possible. We have already mapped the PCC subspace during init, so
  761. * we can directly write to it.
  762. */
  763. static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
  764. {
  765. int ret_val = 0;
  766. void __iomem *vaddr = 0;
  767. struct cpc_reg *reg = &reg_res->cpc_entry.reg;
  768. if (reg_res->type == ACPI_TYPE_INTEGER) {
  769. *val = reg_res->cpc_entry.int_value;
  770. return ret_val;
  771. }
  772. *val = 0;
  773. if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM)
  774. vaddr = GET_PCC_VADDR(reg->address);
  775. else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
  776. vaddr = reg_res->sys_mem_vaddr;
  777. else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
  778. return cpc_read_ffh(cpu, reg, val);
  779. else
  780. return acpi_os_read_memory((acpi_physical_address)reg->address,
  781. val, reg->bit_width);
  782. switch (reg->bit_width) {
  783. case 8:
  784. *val = readb_relaxed(vaddr);
  785. break;
  786. case 16:
  787. *val = readw_relaxed(vaddr);
  788. break;
  789. case 32:
  790. *val = readl_relaxed(vaddr);
  791. break;
  792. case 64:
  793. *val = readq_relaxed(vaddr);
  794. break;
  795. default:
  796. pr_debug("Error: Cannot read %u bit width from PCC\n",
  797. reg->bit_width);
  798. ret_val = -EFAULT;
  799. }
  800. return ret_val;
  801. }
  802. static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
  803. {
  804. int ret_val = 0;
  805. void __iomem *vaddr = 0;
  806. struct cpc_reg *reg = &reg_res->cpc_entry.reg;
  807. if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM)
  808. vaddr = GET_PCC_VADDR(reg->address);
  809. else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
  810. vaddr = reg_res->sys_mem_vaddr;
  811. else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
  812. return cpc_write_ffh(cpu, reg, val);
  813. else
  814. return acpi_os_write_memory((acpi_physical_address)reg->address,
  815. val, reg->bit_width);
  816. switch (reg->bit_width) {
  817. case 8:
  818. writeb_relaxed(val, vaddr);
  819. break;
  820. case 16:
  821. writew_relaxed(val, vaddr);
  822. break;
  823. case 32:
  824. writel_relaxed(val, vaddr);
  825. break;
  826. case 64:
  827. writeq_relaxed(val, vaddr);
  828. break;
  829. default:
  830. pr_debug("Error: Cannot write %u bit width to PCC\n",
  831. reg->bit_width);
  832. ret_val = -EFAULT;
  833. break;
  834. }
  835. return ret_val;
  836. }
  837. /**
  838. * cppc_get_perf_caps - Get a CPUs performance capabilities.
  839. * @cpunum: CPU from which to get capabilities info.
  840. * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
  841. *
  842. * Return: 0 for success with perf_caps populated else -ERRNO.
  843. */
  844. int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
  845. {
  846. struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
  847. struct cpc_register_resource *highest_reg, *lowest_reg,
  848. *lowest_non_linear_reg, *nominal_reg;
  849. u64 high, low, nom, min_nonlinear;
  850. int ret = 0, regs_in_pcc = 0;
  851. if (!cpc_desc) {
  852. pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
  853. return -ENODEV;
  854. }
  855. highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
  856. lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
  857. lowest_non_linear_reg = &cpc_desc->cpc_regs[LOW_NON_LINEAR_PERF];
  858. nominal_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
  859. /* Are any of the regs PCC ?*/
  860. if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) ||
  861. CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg)) {
  862. regs_in_pcc = 1;
  863. down_write(&pcc_data.pcc_lock);
  864. /* Ring doorbell once to update PCC subspace */
  865. if (send_pcc_cmd(CMD_READ) < 0) {
  866. ret = -EIO;
  867. goto out_err;
  868. }
  869. }
  870. cpc_read(cpunum, highest_reg, &high);
  871. perf_caps->highest_perf = high;
  872. cpc_read(cpunum, lowest_reg, &low);
  873. perf_caps->lowest_perf = low;
  874. cpc_read(cpunum, nominal_reg, &nom);
  875. perf_caps->nominal_perf = nom;
  876. cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear);
  877. perf_caps->lowest_nonlinear_perf = min_nonlinear;
  878. if (!high || !low || !nom || !min_nonlinear)
  879. ret = -EFAULT;
  880. out_err:
  881. if (regs_in_pcc)
  882. up_write(&pcc_data.pcc_lock);
  883. return ret;
  884. }
  885. EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
  886. /**
  887. * cppc_get_perf_ctrs - Read a CPUs performance feedback counters.
  888. * @cpunum: CPU from which to read counters.
  889. * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h
  890. *
  891. * Return: 0 for success with perf_fb_ctrs populated else -ERRNO.
  892. */
  893. int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
  894. {
  895. struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
  896. struct cpc_register_resource *delivered_reg, *reference_reg,
  897. *ref_perf_reg, *ctr_wrap_reg;
  898. u64 delivered, reference, ref_perf, ctr_wrap_time;
  899. int ret = 0, regs_in_pcc = 0;
  900. if (!cpc_desc) {
  901. pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
  902. return -ENODEV;
  903. }
  904. delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
  905. reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
  906. ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
  907. ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME];
  908. /*
  909. * If refernce perf register is not supported then we should
  910. * use the nominal perf value
  911. */
  912. if (!CPC_SUPPORTED(ref_perf_reg))
  913. ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
  914. /* Are any of the regs PCC ?*/
  915. if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) ||
  916. CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) {
  917. down_write(&pcc_data.pcc_lock);
  918. regs_in_pcc = 1;
  919. /* Ring doorbell once to update PCC subspace */
  920. if (send_pcc_cmd(CMD_READ) < 0) {
  921. ret = -EIO;
  922. goto out_err;
  923. }
  924. }
  925. cpc_read(cpunum, delivered_reg, &delivered);
  926. cpc_read(cpunum, reference_reg, &reference);
  927. cpc_read(cpunum, ref_perf_reg, &ref_perf);
  928. /*
  929. * Per spec, if ctr_wrap_time optional register is unsupported, then the
  930. * performance counters are assumed to never wrap during the lifetime of
  931. * platform
  932. */
  933. ctr_wrap_time = (u64)(~((u64)0));
  934. if (CPC_SUPPORTED(ctr_wrap_reg))
  935. cpc_read(cpunum, ctr_wrap_reg, &ctr_wrap_time);
  936. if (!delivered || !reference || !ref_perf) {
  937. ret = -EFAULT;
  938. goto out_err;
  939. }
  940. perf_fb_ctrs->delivered = delivered;
  941. perf_fb_ctrs->reference = reference;
  942. perf_fb_ctrs->reference_perf = ref_perf;
  943. perf_fb_ctrs->wraparound_time = ctr_wrap_time;
  944. out_err:
  945. if (regs_in_pcc)
  946. up_write(&pcc_data.pcc_lock);
  947. return ret;
  948. }
  949. EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
  950. /**
  951. * cppc_set_perf - Set a CPUs performance controls.
  952. * @cpu: CPU for which to set performance controls.
  953. * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
  954. *
  955. * Return: 0 for success, -ERRNO otherwise.
  956. */
  957. int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
  958. {
  959. struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
  960. struct cpc_register_resource *desired_reg;
  961. int ret = 0;
  962. if (!cpc_desc) {
  963. pr_debug("No CPC descriptor for CPU:%d\n", cpu);
  964. return -ENODEV;
  965. }
  966. desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
  967. /*
  968. * This is Phase-I where we want to write to CPC registers
  969. * -> We want all CPUs to be able to execute this phase in parallel
  970. *
  971. * Since read_lock can be acquired by multiple CPUs simultaneously we
  972. * achieve that goal here
  973. */
  974. if (CPC_IN_PCC(desired_reg)) {
  975. down_read(&pcc_data.pcc_lock); /* BEGIN Phase-I */
  976. if (pcc_data.platform_owns_pcc) {
  977. ret = check_pcc_chan(false);
  978. if (ret) {
  979. up_read(&pcc_data.pcc_lock);
  980. return ret;
  981. }
  982. }
  983. /*
  984. * Update the pending_write to make sure a PCC CMD_READ will not
  985. * arrive and steal the channel during the switch to write lock
  986. */
  987. pcc_data.pending_pcc_write_cmd = true;
  988. cpc_desc->write_cmd_id = pcc_data.pcc_write_cnt;
  989. cpc_desc->write_cmd_status = 0;
  990. }
  991. /*
  992. * Skip writing MIN/MAX until Linux knows how to come up with
  993. * useful values.
  994. */
  995. cpc_write(cpu, desired_reg, perf_ctrls->desired_perf);
  996. if (CPC_IN_PCC(desired_reg))
  997. up_read(&pcc_data.pcc_lock); /* END Phase-I */
  998. /*
  999. * This is Phase-II where we transfer the ownership of PCC to Platform
  1000. *
  1001. * Short Summary: Basically if we think of a group of cppc_set_perf
  1002. * requests that happened in short overlapping interval. The last CPU to
  1003. * come out of Phase-I will enter Phase-II and ring the doorbell.
  1004. *
  1005. * We have the following requirements for Phase-II:
  1006. * 1. We want to execute Phase-II only when there are no CPUs
  1007. * currently executing in Phase-I
  1008. * 2. Once we start Phase-II we want to avoid all other CPUs from
  1009. * entering Phase-I.
  1010. * 3. We want only one CPU among all those who went through Phase-I
  1011. * to run phase-II
  1012. *
  1013. * If write_trylock fails to get the lock and doesn't transfer the
  1014. * PCC ownership to the platform, then one of the following will be TRUE
  1015. * 1. There is at-least one CPU in Phase-I which will later execute
  1016. * write_trylock, so the CPUs in Phase-I will be responsible for
  1017. * executing the Phase-II.
  1018. * 2. Some other CPU has beaten this CPU to successfully execute the
  1019. * write_trylock and has already acquired the write_lock. We know for a
  1020. * fact it(other CPU acquiring the write_lock) couldn't have happened
  1021. * before this CPU's Phase-I as we held the read_lock.
  1022. * 3. Some other CPU executing pcc CMD_READ has stolen the
  1023. * down_write, in which case, send_pcc_cmd will check for pending
  1024. * CMD_WRITE commands by checking the pending_pcc_write_cmd.
  1025. * So this CPU can be certain that its request will be delivered
  1026. * So in all cases, this CPU knows that its request will be delivered
  1027. * by another CPU and can return
  1028. *
  1029. * After getting the down_write we still need to check for
  1030. * pending_pcc_write_cmd to take care of the following scenario
  1031. * The thread running this code could be scheduled out between
  1032. * Phase-I and Phase-II. Before it is scheduled back on, another CPU
  1033. * could have delivered the request to Platform by triggering the
  1034. * doorbell and transferred the ownership of PCC to platform. So this
  1035. * avoids triggering an unnecessary doorbell and more importantly before
  1036. * triggering the doorbell it makes sure that the PCC channel ownership
  1037. * is still with OSPM.
  1038. * pending_pcc_write_cmd can also be cleared by a different CPU, if
  1039. * there was a pcc CMD_READ waiting on down_write and it steals the lock
  1040. * before the pcc CMD_WRITE is completed. pcc_send_cmd checks for this
  1041. * case during a CMD_READ and if there are pending writes it delivers
  1042. * the write command before servicing the read command
  1043. */
  1044. if (CPC_IN_PCC(desired_reg)) {
  1045. if (down_write_trylock(&pcc_data.pcc_lock)) { /* BEGIN Phase-II */
  1046. /* Update only if there are pending write commands */
  1047. if (pcc_data.pending_pcc_write_cmd)
  1048. send_pcc_cmd(CMD_WRITE);
  1049. up_write(&pcc_data.pcc_lock); /* END Phase-II */
  1050. } else
  1051. /* Wait until pcc_write_cnt is updated by send_pcc_cmd */
  1052. wait_event(pcc_data.pcc_write_wait_q,
  1053. cpc_desc->write_cmd_id != pcc_data.pcc_write_cnt);
  1054. /* send_pcc_cmd updates the status in case of failure */
  1055. ret = cpc_desc->write_cmd_status;
  1056. }
  1057. return ret;
  1058. }
  1059. EXPORT_SYMBOL_GPL(cppc_set_perf);
  1060. /**
  1061. * cppc_get_transition_latency - returns frequency transition latency in ns
  1062. *
  1063. * ACPI CPPC does not explicitly specifiy how a platform can specify the
  1064. * transition latency for perfromance change requests. The closest we have
  1065. * is the timing information from the PCCT tables which provides the info
  1066. * on the number and frequency of PCC commands the platform can handle.
  1067. */
  1068. unsigned int cppc_get_transition_latency(int cpu_num)
  1069. {
  1070. /*
  1071. * Expected transition latency is based on the PCCT timing values
  1072. * Below are definition from ACPI spec:
  1073. * pcc_nominal- Expected latency to process a command, in microseconds
  1074. * pcc_mpar - The maximum number of periodic requests that the subspace
  1075. * channel can support, reported in commands per minute. 0
  1076. * indicates no limitation.
  1077. * pcc_mrtt - The minimum amount of time that OSPM must wait after the
  1078. * completion of a command before issuing the next command,
  1079. * in microseconds.
  1080. */
  1081. unsigned int latency_ns = 0;
  1082. struct cpc_desc *cpc_desc;
  1083. struct cpc_register_resource *desired_reg;
  1084. cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
  1085. if (!cpc_desc)
  1086. return CPUFREQ_ETERNAL;
  1087. desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
  1088. if (!CPC_IN_PCC(desired_reg))
  1089. return CPUFREQ_ETERNAL;
  1090. if (pcc_data.pcc_mpar)
  1091. latency_ns = 60 * (1000 * 1000 * 1000 / pcc_data.pcc_mpar);
  1092. latency_ns = max(latency_ns, pcc_data.pcc_nominal * 1000);
  1093. latency_ns = max(latency_ns, pcc_data.pcc_mrtt * 1000);
  1094. return latency_ns;
  1095. }
  1096. EXPORT_SYMBOL_GPL(cppc_get_transition_latency);