interface.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317
  1. /*
  2. * Greybus interface code
  3. *
  4. * Copyright 2014 Google Inc.
  5. * Copyright 2014 Linaro Ltd.
  6. *
  7. * Released under the GPLv2 only.
  8. */
  9. #include <linux/delay.h>
  10. #include "greybus.h"
  11. #include "greybus_trace.h"
  12. #define GB_INTERFACE_MODE_SWITCH_TIMEOUT 2000
  13. #define GB_INTERFACE_DEVICE_ID_BAD 0xff
  14. #define GB_INTERFACE_AUTOSUSPEND_MS 3000
  15. /* Time required for interface to enter standby before disabling REFCLK */
  16. #define GB_INTERFACE_SUSPEND_HIBERNATE_DELAY_MS 20
  17. /* Don't-care selector index */
  18. #define DME_SELECTOR_INDEX_NULL 0
  19. /* DME attributes */
  20. /* FIXME: remove ES2 support and DME_T_TST_SRC_INCREMENT */
  21. #define DME_T_TST_SRC_INCREMENT 0x4083
  22. #define DME_DDBL1_MANUFACTURERID 0x5003
  23. #define DME_DDBL1_PRODUCTID 0x5004
  24. #define DME_TOSHIBA_GMP_VID 0x6000
  25. #define DME_TOSHIBA_GMP_PID 0x6001
  26. #define DME_TOSHIBA_GMP_SN0 0x6002
  27. #define DME_TOSHIBA_GMP_SN1 0x6003
  28. #define DME_TOSHIBA_GMP_INIT_STATUS 0x6101
  29. /* DDBL1 Manufacturer and Product ids */
  30. #define TOSHIBA_DMID 0x0126
  31. #define TOSHIBA_ES2_BRIDGE_DPID 0x1000
  32. #define TOSHIBA_ES3_APBRIDGE_DPID 0x1001
  33. #define TOSHIBA_ES3_GBPHY_DPID 0x1002
  34. static int gb_interface_hibernate_link(struct gb_interface *intf);
  35. static int gb_interface_refclk_set(struct gb_interface *intf, bool enable);
  36. static int gb_interface_dme_attr_get(struct gb_interface *intf,
  37. u16 attr, u32 *val)
  38. {
  39. return gb_svc_dme_peer_get(intf->hd->svc, intf->interface_id,
  40. attr, DME_SELECTOR_INDEX_NULL, val);
  41. }
  42. static int gb_interface_read_ara_dme(struct gb_interface *intf)
  43. {
  44. u32 sn0, sn1;
  45. int ret;
  46. /*
  47. * Unless this is a Toshiba bridge, bail out until we have defined
  48. * standard GMP attributes.
  49. */
  50. if (intf->ddbl1_manufacturer_id != TOSHIBA_DMID) {
  51. dev_err(&intf->dev, "unknown manufacturer %08x\n",
  52. intf->ddbl1_manufacturer_id);
  53. return -ENODEV;
  54. }
  55. ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_VID,
  56. &intf->vendor_id);
  57. if (ret)
  58. return ret;
  59. ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_PID,
  60. &intf->product_id);
  61. if (ret)
  62. return ret;
  63. ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_SN0, &sn0);
  64. if (ret)
  65. return ret;
  66. ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_SN1, &sn1);
  67. if (ret)
  68. return ret;
  69. intf->serial_number = (u64)sn1 << 32 | sn0;
  70. return 0;
  71. }
  72. static int gb_interface_read_dme(struct gb_interface *intf)
  73. {
  74. int ret;
  75. /* DME attributes have already been read */
  76. if (intf->dme_read)
  77. return 0;
  78. ret = gb_interface_dme_attr_get(intf, DME_DDBL1_MANUFACTURERID,
  79. &intf->ddbl1_manufacturer_id);
  80. if (ret)
  81. return ret;
  82. ret = gb_interface_dme_attr_get(intf, DME_DDBL1_PRODUCTID,
  83. &intf->ddbl1_product_id);
  84. if (ret)
  85. return ret;
  86. if (intf->ddbl1_manufacturer_id == TOSHIBA_DMID &&
  87. intf->ddbl1_product_id == TOSHIBA_ES2_BRIDGE_DPID) {
  88. intf->quirks |= GB_INTERFACE_QUIRK_NO_GMP_IDS;
  89. intf->quirks |= GB_INTERFACE_QUIRK_NO_INIT_STATUS;
  90. }
  91. ret = gb_interface_read_ara_dme(intf);
  92. if (ret)
  93. return ret;
  94. intf->dme_read = true;
  95. return 0;
  96. }
  97. static int gb_interface_route_create(struct gb_interface *intf)
  98. {
  99. struct gb_svc *svc = intf->hd->svc;
  100. u8 intf_id = intf->interface_id;
  101. u8 device_id;
  102. int ret;
  103. /* Allocate an interface device id. */
  104. ret = ida_simple_get(&svc->device_id_map,
  105. GB_SVC_DEVICE_ID_MIN, GB_SVC_DEVICE_ID_MAX + 1,
  106. GFP_KERNEL);
  107. if (ret < 0) {
  108. dev_err(&intf->dev, "failed to allocate device id: %d\n", ret);
  109. return ret;
  110. }
  111. device_id = ret;
  112. ret = gb_svc_intf_device_id(svc, intf_id, device_id);
  113. if (ret) {
  114. dev_err(&intf->dev, "failed to set device id %u: %d\n",
  115. device_id, ret);
  116. goto err_ida_remove;
  117. }
  118. /* FIXME: Hard-coded AP device id. */
  119. ret = gb_svc_route_create(svc, svc->ap_intf_id, GB_SVC_DEVICE_ID_AP,
  120. intf_id, device_id);
  121. if (ret) {
  122. dev_err(&intf->dev, "failed to create route: %d\n", ret);
  123. goto err_svc_id_free;
  124. }
  125. intf->device_id = device_id;
  126. return 0;
  127. err_svc_id_free:
  128. /*
  129. * XXX Should we tell SVC that this id doesn't belong to interface
  130. * XXX anymore.
  131. */
  132. err_ida_remove:
  133. ida_simple_remove(&svc->device_id_map, device_id);
  134. return ret;
  135. }
  136. static void gb_interface_route_destroy(struct gb_interface *intf)
  137. {
  138. struct gb_svc *svc = intf->hd->svc;
  139. if (intf->device_id == GB_INTERFACE_DEVICE_ID_BAD)
  140. return;
  141. gb_svc_route_destroy(svc, svc->ap_intf_id, intf->interface_id);
  142. ida_simple_remove(&svc->device_id_map, intf->device_id);
  143. intf->device_id = GB_INTERFACE_DEVICE_ID_BAD;
  144. }
  145. /* Locking: Caller holds the interface mutex. */
  146. static int gb_interface_legacy_mode_switch(struct gb_interface *intf)
  147. {
  148. int ret;
  149. dev_info(&intf->dev, "legacy mode switch detected\n");
  150. /* Mark as disconnected to prevent I/O during disable. */
  151. intf->disconnected = true;
  152. gb_interface_disable(intf);
  153. intf->disconnected = false;
  154. ret = gb_interface_enable(intf);
  155. if (ret) {
  156. dev_err(&intf->dev, "failed to re-enable interface: %d\n", ret);
  157. gb_interface_deactivate(intf);
  158. }
  159. return ret;
  160. }
  161. void gb_interface_mailbox_event(struct gb_interface *intf, u16 result,
  162. u32 mailbox)
  163. {
  164. mutex_lock(&intf->mutex);
  165. if (result) {
  166. dev_warn(&intf->dev,
  167. "mailbox event with UniPro error: 0x%04x\n",
  168. result);
  169. goto err_disable;
  170. }
  171. if (mailbox != GB_SVC_INTF_MAILBOX_GREYBUS) {
  172. dev_warn(&intf->dev,
  173. "mailbox event with unexpected value: 0x%08x\n",
  174. mailbox);
  175. goto err_disable;
  176. }
  177. if (intf->quirks & GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH) {
  178. gb_interface_legacy_mode_switch(intf);
  179. goto out_unlock;
  180. }
  181. if (!intf->mode_switch) {
  182. dev_warn(&intf->dev, "unexpected mailbox event: 0x%08x\n",
  183. mailbox);
  184. goto err_disable;
  185. }
  186. dev_info(&intf->dev, "mode switch detected\n");
  187. complete(&intf->mode_switch_completion);
  188. out_unlock:
  189. mutex_unlock(&intf->mutex);
  190. return;
  191. err_disable:
  192. gb_interface_disable(intf);
  193. gb_interface_deactivate(intf);
  194. mutex_unlock(&intf->mutex);
  195. }
  196. static void gb_interface_mode_switch_work(struct work_struct *work)
  197. {
  198. struct gb_interface *intf;
  199. struct gb_control *control;
  200. unsigned long timeout;
  201. int ret;
  202. intf = container_of(work, struct gb_interface, mode_switch_work);
  203. mutex_lock(&intf->mutex);
  204. /* Make sure interface is still enabled. */
  205. if (!intf->enabled) {
  206. dev_dbg(&intf->dev, "mode switch aborted\n");
  207. intf->mode_switch = false;
  208. mutex_unlock(&intf->mutex);
  209. goto out_interface_put;
  210. }
  211. /*
  212. * Prepare the control device for mode switch and make sure to get an
  213. * extra reference before it goes away during interface disable.
  214. */
  215. control = gb_control_get(intf->control);
  216. gb_control_mode_switch_prepare(control);
  217. gb_interface_disable(intf);
  218. mutex_unlock(&intf->mutex);
  219. timeout = msecs_to_jiffies(GB_INTERFACE_MODE_SWITCH_TIMEOUT);
  220. ret = wait_for_completion_interruptible_timeout(
  221. &intf->mode_switch_completion, timeout);
  222. /* Finalise control-connection mode switch. */
  223. gb_control_mode_switch_complete(control);
  224. gb_control_put(control);
  225. if (ret < 0) {
  226. dev_err(&intf->dev, "mode switch interrupted\n");
  227. goto err_deactivate;
  228. } else if (ret == 0) {
  229. dev_err(&intf->dev, "mode switch timed out\n");
  230. goto err_deactivate;
  231. }
  232. /* Re-enable (re-enumerate) interface if still active. */
  233. mutex_lock(&intf->mutex);
  234. intf->mode_switch = false;
  235. if (intf->active) {
  236. ret = gb_interface_enable(intf);
  237. if (ret) {
  238. dev_err(&intf->dev, "failed to re-enable interface: %d\n",
  239. ret);
  240. gb_interface_deactivate(intf);
  241. }
  242. }
  243. mutex_unlock(&intf->mutex);
  244. out_interface_put:
  245. gb_interface_put(intf);
  246. return;
  247. err_deactivate:
  248. mutex_lock(&intf->mutex);
  249. intf->mode_switch = false;
  250. gb_interface_deactivate(intf);
  251. mutex_unlock(&intf->mutex);
  252. gb_interface_put(intf);
  253. }
  254. int gb_interface_request_mode_switch(struct gb_interface *intf)
  255. {
  256. int ret = 0;
  257. mutex_lock(&intf->mutex);
  258. if (intf->mode_switch) {
  259. ret = -EBUSY;
  260. goto out_unlock;
  261. }
  262. intf->mode_switch = true;
  263. reinit_completion(&intf->mode_switch_completion);
  264. /*
  265. * Get a reference to the interface device, which will be put once the
  266. * mode switch is complete.
  267. */
  268. get_device(&intf->dev);
  269. if (!queue_work(system_long_wq, &intf->mode_switch_work)) {
  270. put_device(&intf->dev);
  271. ret = -EBUSY;
  272. goto out_unlock;
  273. }
  274. out_unlock:
  275. mutex_unlock(&intf->mutex);
  276. return ret;
  277. }
  278. EXPORT_SYMBOL_GPL(gb_interface_request_mode_switch);
  279. /*
  280. * T_TstSrcIncrement is written by the module on ES2 as a stand-in for the
  281. * init-status attribute DME_TOSHIBA_INIT_STATUS. The AP needs to read and
  282. * clear it after reading a non-zero value from it.
  283. *
  284. * FIXME: This is module-hardware dependent and needs to be extended for every
  285. * type of module we want to support.
  286. */
  287. static int gb_interface_read_and_clear_init_status(struct gb_interface *intf)
  288. {
  289. struct gb_host_device *hd = intf->hd;
  290. unsigned long bootrom_quirks;
  291. unsigned long s2l_quirks;
  292. int ret;
  293. u32 value;
  294. u16 attr;
  295. u8 init_status;
  296. /*
  297. * ES2 bridges use T_TstSrcIncrement for the init status.
  298. *
  299. * FIXME: Remove ES2 support
  300. */
  301. if (intf->quirks & GB_INTERFACE_QUIRK_NO_INIT_STATUS)
  302. attr = DME_T_TST_SRC_INCREMENT;
  303. else
  304. attr = DME_TOSHIBA_GMP_INIT_STATUS;
  305. ret = gb_svc_dme_peer_get(hd->svc, intf->interface_id, attr,
  306. DME_SELECTOR_INDEX_NULL, &value);
  307. if (ret)
  308. return ret;
  309. /*
  310. * A nonzero init status indicates the module has finished
  311. * initializing.
  312. */
  313. if (!value) {
  314. dev_err(&intf->dev, "invalid init status\n");
  315. return -ENODEV;
  316. }
  317. /*
  318. * Extract the init status.
  319. *
  320. * For ES2: We need to check lowest 8 bits of 'value'.
  321. * For ES3: We need to check highest 8 bits out of 32 of 'value'.
  322. *
  323. * FIXME: Remove ES2 support
  324. */
  325. if (intf->quirks & GB_INTERFACE_QUIRK_NO_INIT_STATUS)
  326. init_status = value & 0xff;
  327. else
  328. init_status = value >> 24;
  329. /*
  330. * Check if the interface is executing the quirky ES3 bootrom that,
  331. * for example, requires E2EFC, CSD and CSV to be disabled.
  332. */
  333. bootrom_quirks = GB_INTERFACE_QUIRK_NO_CPORT_FEATURES |
  334. GB_INTERFACE_QUIRK_FORCED_DISABLE |
  335. GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH |
  336. GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE;
  337. s2l_quirks = GB_INTERFACE_QUIRK_NO_PM;
  338. switch (init_status) {
  339. case GB_INIT_BOOTROM_UNIPRO_BOOT_STARTED:
  340. case GB_INIT_BOOTROM_FALLBACK_UNIPRO_BOOT_STARTED:
  341. intf->quirks |= bootrom_quirks;
  342. break;
  343. case GB_INIT_S2_LOADER_BOOT_STARTED:
  344. /* S2 Loader doesn't support runtime PM */
  345. intf->quirks &= ~bootrom_quirks;
  346. intf->quirks |= s2l_quirks;
  347. break;
  348. default:
  349. intf->quirks &= ~bootrom_quirks;
  350. intf->quirks &= ~s2l_quirks;
  351. }
  352. /* Clear the init status. */
  353. return gb_svc_dme_peer_set(hd->svc, intf->interface_id, attr,
  354. DME_SELECTOR_INDEX_NULL, 0);
  355. }
  356. /* interface sysfs attributes */
  357. #define gb_interface_attr(field, type) \
  358. static ssize_t field##_show(struct device *dev, \
  359. struct device_attribute *attr, \
  360. char *buf) \
  361. { \
  362. struct gb_interface *intf = to_gb_interface(dev); \
  363. return scnprintf(buf, PAGE_SIZE, type"\n", intf->field); \
  364. } \
  365. static DEVICE_ATTR_RO(field)
  366. gb_interface_attr(ddbl1_manufacturer_id, "0x%08x");
  367. gb_interface_attr(ddbl1_product_id, "0x%08x");
  368. gb_interface_attr(interface_id, "%u");
  369. gb_interface_attr(vendor_id, "0x%08x");
  370. gb_interface_attr(product_id, "0x%08x");
  371. gb_interface_attr(serial_number, "0x%016llx");
  372. static ssize_t voltage_now_show(struct device *dev,
  373. struct device_attribute *attr, char *buf)
  374. {
  375. struct gb_interface *intf = to_gb_interface(dev);
  376. int ret;
  377. u32 measurement;
  378. ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id,
  379. GB_SVC_PWRMON_TYPE_VOL,
  380. &measurement);
  381. if (ret) {
  382. dev_err(&intf->dev, "failed to get voltage sample (%d)\n", ret);
  383. return ret;
  384. }
  385. return sprintf(buf, "%u\n", measurement);
  386. }
  387. static DEVICE_ATTR_RO(voltage_now);
  388. static ssize_t current_now_show(struct device *dev,
  389. struct device_attribute *attr, char *buf)
  390. {
  391. struct gb_interface *intf = to_gb_interface(dev);
  392. int ret;
  393. u32 measurement;
  394. ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id,
  395. GB_SVC_PWRMON_TYPE_CURR,
  396. &measurement);
  397. if (ret) {
  398. dev_err(&intf->dev, "failed to get current sample (%d)\n", ret);
  399. return ret;
  400. }
  401. return sprintf(buf, "%u\n", measurement);
  402. }
  403. static DEVICE_ATTR_RO(current_now);
  404. static ssize_t power_now_show(struct device *dev,
  405. struct device_attribute *attr, char *buf)
  406. {
  407. struct gb_interface *intf = to_gb_interface(dev);
  408. int ret;
  409. u32 measurement;
  410. ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id,
  411. GB_SVC_PWRMON_TYPE_PWR,
  412. &measurement);
  413. if (ret) {
  414. dev_err(&intf->dev, "failed to get power sample (%d)\n", ret);
  415. return ret;
  416. }
  417. return sprintf(buf, "%u\n", measurement);
  418. }
  419. static DEVICE_ATTR_RO(power_now);
  420. static ssize_t power_state_show(struct device *dev,
  421. struct device_attribute *attr, char *buf)
  422. {
  423. struct gb_interface *intf = to_gb_interface(dev);
  424. if (intf->active)
  425. return scnprintf(buf, PAGE_SIZE, "on\n");
  426. else
  427. return scnprintf(buf, PAGE_SIZE, "off\n");
  428. }
  429. static ssize_t power_state_store(struct device *dev,
  430. struct device_attribute *attr, const char *buf,
  431. size_t len)
  432. {
  433. struct gb_interface *intf = to_gb_interface(dev);
  434. bool activate;
  435. int ret = 0;
  436. if (kstrtobool(buf, &activate))
  437. return -EINVAL;
  438. mutex_lock(&intf->mutex);
  439. if (activate == intf->active)
  440. goto unlock;
  441. if (activate) {
  442. ret = gb_interface_activate(intf);
  443. if (ret) {
  444. dev_err(&intf->dev,
  445. "failed to activate interface: %d\n", ret);
  446. goto unlock;
  447. }
  448. ret = gb_interface_enable(intf);
  449. if (ret) {
  450. dev_err(&intf->dev,
  451. "failed to enable interface: %d\n", ret);
  452. gb_interface_deactivate(intf);
  453. goto unlock;
  454. }
  455. } else {
  456. gb_interface_disable(intf);
  457. gb_interface_deactivate(intf);
  458. }
  459. unlock:
  460. mutex_unlock(&intf->mutex);
  461. if (ret)
  462. return ret;
  463. return len;
  464. }
  465. static DEVICE_ATTR_RW(power_state);
  466. static const char *gb_interface_type_string(struct gb_interface *intf)
  467. {
  468. static const char * const types[] = {
  469. [GB_INTERFACE_TYPE_INVALID] = "invalid",
  470. [GB_INTERFACE_TYPE_UNKNOWN] = "unknown",
  471. [GB_INTERFACE_TYPE_DUMMY] = "dummy",
  472. [GB_INTERFACE_TYPE_UNIPRO] = "unipro",
  473. [GB_INTERFACE_TYPE_GREYBUS] = "greybus",
  474. };
  475. return types[intf->type];
  476. }
  477. static ssize_t interface_type_show(struct device *dev,
  478. struct device_attribute *attr, char *buf)
  479. {
  480. struct gb_interface *intf = to_gb_interface(dev);
  481. return sprintf(buf, "%s\n", gb_interface_type_string(intf));
  482. }
  483. static DEVICE_ATTR_RO(interface_type);
  484. static struct attribute *interface_unipro_attrs[] = {
  485. &dev_attr_ddbl1_manufacturer_id.attr,
  486. &dev_attr_ddbl1_product_id.attr,
  487. NULL
  488. };
  489. static struct attribute *interface_greybus_attrs[] = {
  490. &dev_attr_vendor_id.attr,
  491. &dev_attr_product_id.attr,
  492. &dev_attr_serial_number.attr,
  493. NULL
  494. };
  495. static struct attribute *interface_power_attrs[] = {
  496. &dev_attr_voltage_now.attr,
  497. &dev_attr_current_now.attr,
  498. &dev_attr_power_now.attr,
  499. &dev_attr_power_state.attr,
  500. NULL
  501. };
  502. static struct attribute *interface_common_attrs[] = {
  503. &dev_attr_interface_id.attr,
  504. &dev_attr_interface_type.attr,
  505. NULL
  506. };
  507. static umode_t interface_unipro_is_visible(struct kobject *kobj,
  508. struct attribute *attr, int n)
  509. {
  510. struct device *dev = container_of(kobj, struct device, kobj);
  511. struct gb_interface *intf = to_gb_interface(dev);
  512. switch (intf->type) {
  513. case GB_INTERFACE_TYPE_UNIPRO:
  514. case GB_INTERFACE_TYPE_GREYBUS:
  515. return attr->mode;
  516. default:
  517. return 0;
  518. }
  519. }
  520. static umode_t interface_greybus_is_visible(struct kobject *kobj,
  521. struct attribute *attr, int n)
  522. {
  523. struct device *dev = container_of(kobj, struct device, kobj);
  524. struct gb_interface *intf = to_gb_interface(dev);
  525. switch (intf->type) {
  526. case GB_INTERFACE_TYPE_GREYBUS:
  527. return attr->mode;
  528. default:
  529. return 0;
  530. }
  531. }
  532. static umode_t interface_power_is_visible(struct kobject *kobj,
  533. struct attribute *attr, int n)
  534. {
  535. struct device *dev = container_of(kobj, struct device, kobj);
  536. struct gb_interface *intf = to_gb_interface(dev);
  537. switch (intf->type) {
  538. case GB_INTERFACE_TYPE_UNIPRO:
  539. case GB_INTERFACE_TYPE_GREYBUS:
  540. return attr->mode;
  541. default:
  542. return 0;
  543. }
  544. }
  545. static const struct attribute_group interface_unipro_group = {
  546. .is_visible = interface_unipro_is_visible,
  547. .attrs = interface_unipro_attrs,
  548. };
  549. static const struct attribute_group interface_greybus_group = {
  550. .is_visible = interface_greybus_is_visible,
  551. .attrs = interface_greybus_attrs,
  552. };
  553. static const struct attribute_group interface_power_group = {
  554. .is_visible = interface_power_is_visible,
  555. .attrs = interface_power_attrs,
  556. };
  557. static const struct attribute_group interface_common_group = {
  558. .attrs = interface_common_attrs,
  559. };
  560. static const struct attribute_group *interface_groups[] = {
  561. &interface_unipro_group,
  562. &interface_greybus_group,
  563. &interface_power_group,
  564. &interface_common_group,
  565. NULL
  566. };
  567. static void gb_interface_release(struct device *dev)
  568. {
  569. struct gb_interface *intf = to_gb_interface(dev);
  570. trace_gb_interface_release(intf);
  571. kfree(intf);
  572. }
  573. #ifdef CONFIG_PM
  574. static int gb_interface_suspend(struct device *dev)
  575. {
  576. struct gb_interface *intf = to_gb_interface(dev);
  577. int ret, timesync_ret;
  578. ret = gb_control_interface_suspend_prepare(intf->control);
  579. if (ret)
  580. return ret;
  581. gb_timesync_interface_remove(intf);
  582. ret = gb_control_suspend(intf->control);
  583. if (ret)
  584. goto err_hibernate_abort;
  585. ret = gb_interface_hibernate_link(intf);
  586. if (ret)
  587. return ret;
  588. /* Delay to allow interface to enter standby before disabling refclk */
  589. msleep(GB_INTERFACE_SUSPEND_HIBERNATE_DELAY_MS);
  590. ret = gb_interface_refclk_set(intf, false);
  591. if (ret)
  592. return ret;
  593. return 0;
  594. err_hibernate_abort:
  595. gb_control_interface_hibernate_abort(intf->control);
  596. timesync_ret = gb_timesync_interface_add(intf);
  597. if (timesync_ret) {
  598. dev_err(dev, "failed to add to timesync: %d\n", timesync_ret);
  599. return timesync_ret;
  600. }
  601. return ret;
  602. }
  603. static int gb_interface_resume(struct device *dev)
  604. {
  605. struct gb_interface *intf = to_gb_interface(dev);
  606. struct gb_svc *svc = intf->hd->svc;
  607. int ret;
  608. ret = gb_interface_refclk_set(intf, true);
  609. if (ret)
  610. return ret;
  611. ret = gb_svc_intf_resume(svc, intf->interface_id);
  612. if (ret)
  613. return ret;
  614. ret = gb_control_resume(intf->control);
  615. if (ret)
  616. return ret;
  617. ret = gb_timesync_interface_add(intf);
  618. if (ret) {
  619. dev_err(dev, "failed to add to timesync: %d\n", ret);
  620. return ret;
  621. }
  622. ret = gb_timesync_schedule_synchronous(intf);
  623. if (ret) {
  624. dev_err(dev, "failed to synchronize FrameTime: %d\n", ret);
  625. return ret;
  626. }
  627. return 0;
  628. }
  629. static int gb_interface_runtime_idle(struct device *dev)
  630. {
  631. pm_runtime_mark_last_busy(dev);
  632. pm_request_autosuspend(dev);
  633. return 0;
  634. }
  635. #endif
  636. static const struct dev_pm_ops gb_interface_pm_ops = {
  637. SET_RUNTIME_PM_OPS(gb_interface_suspend, gb_interface_resume,
  638. gb_interface_runtime_idle)
  639. };
  640. struct device_type greybus_interface_type = {
  641. .name = "greybus_interface",
  642. .release = gb_interface_release,
  643. .pm = &gb_interface_pm_ops,
  644. };
  645. /*
  646. * A Greybus module represents a user-replaceable component on a GMP
  647. * phone. An interface is the physical connection on that module. A
  648. * module may have more than one interface.
  649. *
  650. * Create a gb_interface structure to represent a discovered interface.
  651. * The position of interface within the Endo is encoded in "interface_id"
  652. * argument.
  653. *
  654. * Returns a pointer to the new interfce or a null pointer if a
  655. * failure occurs due to memory exhaustion.
  656. */
  657. struct gb_interface *gb_interface_create(struct gb_module *module,
  658. u8 interface_id)
  659. {
  660. struct gb_host_device *hd = module->hd;
  661. struct gb_interface *intf;
  662. intf = kzalloc(sizeof(*intf), GFP_KERNEL);
  663. if (!intf)
  664. return NULL;
  665. intf->hd = hd; /* XXX refcount? */
  666. intf->module = module;
  667. intf->interface_id = interface_id;
  668. INIT_LIST_HEAD(&intf->bundles);
  669. INIT_LIST_HEAD(&intf->manifest_descs);
  670. mutex_init(&intf->mutex);
  671. INIT_WORK(&intf->mode_switch_work, gb_interface_mode_switch_work);
  672. init_completion(&intf->mode_switch_completion);
  673. /* Invalid device id to start with */
  674. intf->device_id = GB_INTERFACE_DEVICE_ID_BAD;
  675. intf->dev.parent = &module->dev;
  676. intf->dev.bus = &greybus_bus_type;
  677. intf->dev.type = &greybus_interface_type;
  678. intf->dev.groups = interface_groups;
  679. intf->dev.dma_mask = module->dev.dma_mask;
  680. device_initialize(&intf->dev);
  681. dev_set_name(&intf->dev, "%s.%u", dev_name(&module->dev),
  682. interface_id);
  683. pm_runtime_set_autosuspend_delay(&intf->dev,
  684. GB_INTERFACE_AUTOSUSPEND_MS);
  685. trace_gb_interface_create(intf);
  686. return intf;
  687. }
  688. static int gb_interface_vsys_set(struct gb_interface *intf, bool enable)
  689. {
  690. struct gb_svc *svc = intf->hd->svc;
  691. int ret;
  692. dev_dbg(&intf->dev, "%s - %d\n", __func__, enable);
  693. ret = gb_svc_intf_vsys_set(svc, intf->interface_id, enable);
  694. if (ret) {
  695. dev_err(&intf->dev, "failed to set v_sys: %d\n", ret);
  696. return ret;
  697. }
  698. return 0;
  699. }
  700. static int gb_interface_refclk_set(struct gb_interface *intf, bool enable)
  701. {
  702. struct gb_svc *svc = intf->hd->svc;
  703. int ret;
  704. dev_dbg(&intf->dev, "%s - %d\n", __func__, enable);
  705. ret = gb_svc_intf_refclk_set(svc, intf->interface_id, enable);
  706. if (ret) {
  707. dev_err(&intf->dev, "failed to set refclk: %d\n", ret);
  708. return ret;
  709. }
  710. return 0;
  711. }
  712. static int gb_interface_unipro_set(struct gb_interface *intf, bool enable)
  713. {
  714. struct gb_svc *svc = intf->hd->svc;
  715. int ret;
  716. dev_dbg(&intf->dev, "%s - %d\n", __func__, enable);
  717. ret = gb_svc_intf_unipro_set(svc, intf->interface_id, enable);
  718. if (ret) {
  719. dev_err(&intf->dev, "failed to set UniPro: %d\n", ret);
  720. return ret;
  721. }
  722. return 0;
  723. }
  724. static int gb_interface_activate_operation(struct gb_interface *intf,
  725. enum gb_interface_type *intf_type)
  726. {
  727. struct gb_svc *svc = intf->hd->svc;
  728. u8 type;
  729. int ret;
  730. dev_dbg(&intf->dev, "%s\n", __func__);
  731. ret = gb_svc_intf_activate(svc, intf->interface_id, &type);
  732. if (ret) {
  733. dev_err(&intf->dev, "failed to activate: %d\n", ret);
  734. return ret;
  735. }
  736. switch (type) {
  737. case GB_SVC_INTF_TYPE_DUMMY:
  738. *intf_type = GB_INTERFACE_TYPE_DUMMY;
  739. /* FIXME: handle as an error for now */
  740. return -ENODEV;
  741. case GB_SVC_INTF_TYPE_UNIPRO:
  742. *intf_type = GB_INTERFACE_TYPE_UNIPRO;
  743. dev_err(&intf->dev, "interface type UniPro not supported\n");
  744. /* FIXME: handle as an error for now */
  745. return -ENODEV;
  746. case GB_SVC_INTF_TYPE_GREYBUS:
  747. *intf_type = GB_INTERFACE_TYPE_GREYBUS;
  748. break;
  749. default:
  750. dev_err(&intf->dev, "unknown interface type: %u\n", type);
  751. *intf_type = GB_INTERFACE_TYPE_UNKNOWN;
  752. return -ENODEV;
  753. }
  754. return 0;
  755. }
  756. static int gb_interface_hibernate_link(struct gb_interface *intf)
  757. {
  758. struct gb_svc *svc = intf->hd->svc;
  759. return gb_svc_intf_set_power_mode_hibernate(svc, intf->interface_id);
  760. }
  761. static int _gb_interface_activate(struct gb_interface *intf,
  762. enum gb_interface_type *type)
  763. {
  764. int ret;
  765. *type = GB_INTERFACE_TYPE_UNKNOWN;
  766. if (intf->ejected || intf->removed)
  767. return -ENODEV;
  768. ret = gb_interface_vsys_set(intf, true);
  769. if (ret)
  770. return ret;
  771. ret = gb_interface_refclk_set(intf, true);
  772. if (ret)
  773. goto err_vsys_disable;
  774. ret = gb_interface_unipro_set(intf, true);
  775. if (ret)
  776. goto err_refclk_disable;
  777. ret = gb_interface_activate_operation(intf, type);
  778. if (ret) {
  779. switch (*type) {
  780. case GB_INTERFACE_TYPE_UNIPRO:
  781. case GB_INTERFACE_TYPE_GREYBUS:
  782. goto err_hibernate_link;
  783. default:
  784. goto err_unipro_disable;
  785. }
  786. }
  787. ret = gb_interface_read_dme(intf);
  788. if (ret)
  789. goto err_hibernate_link;
  790. ret = gb_interface_route_create(intf);
  791. if (ret)
  792. goto err_hibernate_link;
  793. intf->active = true;
  794. trace_gb_interface_activate(intf);
  795. return 0;
  796. err_hibernate_link:
  797. gb_interface_hibernate_link(intf);
  798. err_unipro_disable:
  799. gb_interface_unipro_set(intf, false);
  800. err_refclk_disable:
  801. gb_interface_refclk_set(intf, false);
  802. err_vsys_disable:
  803. gb_interface_vsys_set(intf, false);
  804. return ret;
  805. }
  806. /*
  807. * At present, we assume a UniPro-only module to be a Greybus module that
  808. * failed to send its mailbox poke. There is some reason to believe that this
  809. * is because of a bug in the ES3 bootrom.
  810. *
  811. * FIXME: Check if this is a Toshiba bridge before retrying?
  812. */
  813. static int _gb_interface_activate_es3_hack(struct gb_interface *intf,
  814. enum gb_interface_type *type)
  815. {
  816. int retries = 3;
  817. int ret;
  818. while (retries--) {
  819. ret = _gb_interface_activate(intf, type);
  820. if (ret == -ENODEV && *type == GB_INTERFACE_TYPE_UNIPRO)
  821. continue;
  822. break;
  823. }
  824. return ret;
  825. }
  826. /*
  827. * Activate an interface.
  828. *
  829. * Locking: Caller holds the interface mutex.
  830. */
  831. int gb_interface_activate(struct gb_interface *intf)
  832. {
  833. enum gb_interface_type type;
  834. int ret;
  835. switch (intf->type) {
  836. case GB_INTERFACE_TYPE_INVALID:
  837. case GB_INTERFACE_TYPE_GREYBUS:
  838. ret = _gb_interface_activate_es3_hack(intf, &type);
  839. break;
  840. default:
  841. ret = _gb_interface_activate(intf, &type);
  842. }
  843. /* Make sure type is detected correctly during reactivation. */
  844. if (intf->type != GB_INTERFACE_TYPE_INVALID) {
  845. if (type != intf->type) {
  846. dev_err(&intf->dev, "failed to detect interface type\n");
  847. if (!ret)
  848. gb_interface_deactivate(intf);
  849. return -EIO;
  850. }
  851. } else {
  852. intf->type = type;
  853. }
  854. return ret;
  855. }
  856. /*
  857. * Deactivate an interface.
  858. *
  859. * Locking: Caller holds the interface mutex.
  860. */
  861. void gb_interface_deactivate(struct gb_interface *intf)
  862. {
  863. if (!intf->active)
  864. return;
  865. trace_gb_interface_deactivate(intf);
  866. /* Abort any ongoing mode switch. */
  867. if (intf->mode_switch)
  868. complete(&intf->mode_switch_completion);
  869. gb_interface_route_destroy(intf);
  870. gb_interface_hibernate_link(intf);
  871. gb_interface_unipro_set(intf, false);
  872. gb_interface_refclk_set(intf, false);
  873. gb_interface_vsys_set(intf, false);
  874. intf->active = false;
  875. }
  876. /*
  877. * Enable an interface by enabling its control connection, fetching the
  878. * manifest and other information over it, and finally registering its child
  879. * devices.
  880. *
  881. * Locking: Caller holds the interface mutex.
  882. */
  883. int gb_interface_enable(struct gb_interface *intf)
  884. {
  885. struct gb_control *control;
  886. struct gb_bundle *bundle, *tmp;
  887. int ret, size;
  888. void *manifest;
  889. ret = gb_interface_read_and_clear_init_status(intf);
  890. if (ret) {
  891. dev_err(&intf->dev, "failed to clear init status: %d\n", ret);
  892. return ret;
  893. }
  894. /* Establish control connection */
  895. control = gb_control_create(intf);
  896. if (IS_ERR(control)) {
  897. dev_err(&intf->dev, "failed to create control device: %ld\n",
  898. PTR_ERR(control));
  899. return PTR_ERR(control);
  900. }
  901. intf->control = control;
  902. ret = gb_control_enable(intf->control);
  903. if (ret)
  904. goto err_put_control;
  905. /* Get manifest size using control protocol on CPort */
  906. size = gb_control_get_manifest_size_operation(intf);
  907. if (size <= 0) {
  908. dev_err(&intf->dev, "failed to get manifest size: %d\n", size);
  909. if (size)
  910. ret = size;
  911. else
  912. ret = -EINVAL;
  913. goto err_disable_control;
  914. }
  915. manifest = kmalloc(size, GFP_KERNEL);
  916. if (!manifest) {
  917. ret = -ENOMEM;
  918. goto err_disable_control;
  919. }
  920. /* Get manifest using control protocol on CPort */
  921. ret = gb_control_get_manifest_operation(intf, manifest, size);
  922. if (ret) {
  923. dev_err(&intf->dev, "failed to get manifest: %d\n", ret);
  924. goto err_free_manifest;
  925. }
  926. /*
  927. * Parse the manifest and build up our data structures representing
  928. * what's in it.
  929. */
  930. if (!gb_manifest_parse(intf, manifest, size)) {
  931. dev_err(&intf->dev, "failed to parse manifest\n");
  932. ret = -EINVAL;
  933. goto err_destroy_bundles;
  934. }
  935. ret = gb_control_get_bundle_versions(intf->control);
  936. if (ret)
  937. goto err_destroy_bundles;
  938. ret = gb_timesync_interface_add(intf);
  939. if (ret) {
  940. dev_err(&intf->dev, "failed to add to timesync: %d\n", ret);
  941. goto err_destroy_bundles;
  942. }
  943. /* Register the control device and any bundles */
  944. ret = gb_control_add(intf->control);
  945. if (ret)
  946. goto err_remove_timesync;
  947. pm_runtime_use_autosuspend(&intf->dev);
  948. pm_runtime_get_noresume(&intf->dev);
  949. pm_runtime_set_active(&intf->dev);
  950. pm_runtime_enable(&intf->dev);
  951. list_for_each_entry_safe_reverse(bundle, tmp, &intf->bundles, links) {
  952. ret = gb_bundle_add(bundle);
  953. if (ret) {
  954. gb_bundle_destroy(bundle);
  955. continue;
  956. }
  957. }
  958. kfree(manifest);
  959. intf->enabled = true;
  960. pm_runtime_put(&intf->dev);
  961. trace_gb_interface_enable(intf);
  962. return 0;
  963. err_remove_timesync:
  964. gb_timesync_interface_remove(intf);
  965. err_destroy_bundles:
  966. list_for_each_entry_safe(bundle, tmp, &intf->bundles, links)
  967. gb_bundle_destroy(bundle);
  968. err_free_manifest:
  969. kfree(manifest);
  970. err_disable_control:
  971. gb_control_disable(intf->control);
  972. err_put_control:
  973. gb_control_put(intf->control);
  974. intf->control = NULL;
  975. return ret;
  976. }
  977. /*
  978. * Disable an interface and destroy its bundles.
  979. *
  980. * Locking: Caller holds the interface mutex.
  981. */
  982. void gb_interface_disable(struct gb_interface *intf)
  983. {
  984. struct gb_bundle *bundle;
  985. struct gb_bundle *next;
  986. if (!intf->enabled)
  987. return;
  988. trace_gb_interface_disable(intf);
  989. pm_runtime_get_sync(&intf->dev);
  990. /* Set disconnected flag to avoid I/O during connection tear down. */
  991. if (intf->quirks & GB_INTERFACE_QUIRK_FORCED_DISABLE)
  992. intf->disconnected = true;
  993. list_for_each_entry_safe(bundle, next, &intf->bundles, links)
  994. gb_bundle_destroy(bundle);
  995. if (!intf->mode_switch && !intf->disconnected)
  996. gb_control_interface_deactivate_prepare(intf->control);
  997. gb_control_del(intf->control);
  998. gb_timesync_interface_remove(intf);
  999. gb_control_disable(intf->control);
  1000. gb_control_put(intf->control);
  1001. intf->control = NULL;
  1002. intf->enabled = false;
  1003. pm_runtime_disable(&intf->dev);
  1004. pm_runtime_set_suspended(&intf->dev);
  1005. pm_runtime_dont_use_autosuspend(&intf->dev);
  1006. pm_runtime_put_noidle(&intf->dev);
  1007. }
  1008. /* Enable TimeSync on an Interface control connection. */
  1009. int gb_interface_timesync_enable(struct gb_interface *intf, u8 count,
  1010. u64 frame_time, u32 strobe_delay, u32 refclk)
  1011. {
  1012. return gb_control_timesync_enable(intf->control, count,
  1013. frame_time, strobe_delay,
  1014. refclk);
  1015. }
  1016. /* Disable TimeSync on an Interface control connection. */
  1017. int gb_interface_timesync_disable(struct gb_interface *intf)
  1018. {
  1019. return gb_control_timesync_disable(intf->control);
  1020. }
  1021. /* Transmit the Authoritative FrameTime via an Interface control connection. */
  1022. int gb_interface_timesync_authoritative(struct gb_interface *intf,
  1023. u64 *frame_time)
  1024. {
  1025. return gb_control_timesync_authoritative(intf->control,
  1026. frame_time);
  1027. }
  1028. /* Register an interface. */
  1029. int gb_interface_add(struct gb_interface *intf)
  1030. {
  1031. int ret;
  1032. ret = device_add(&intf->dev);
  1033. if (ret) {
  1034. dev_err(&intf->dev, "failed to register interface: %d\n", ret);
  1035. return ret;
  1036. }
  1037. trace_gb_interface_add(intf);
  1038. dev_info(&intf->dev, "Interface added (%s)\n",
  1039. gb_interface_type_string(intf));
  1040. switch (intf->type) {
  1041. case GB_INTERFACE_TYPE_GREYBUS:
  1042. dev_info(&intf->dev, "GMP VID=0x%08x, PID=0x%08x\n",
  1043. intf->vendor_id, intf->product_id);
  1044. /* fall-through */
  1045. case GB_INTERFACE_TYPE_UNIPRO:
  1046. dev_info(&intf->dev, "DDBL1 Manufacturer=0x%08x, Product=0x%08x\n",
  1047. intf->ddbl1_manufacturer_id,
  1048. intf->ddbl1_product_id);
  1049. break;
  1050. default:
  1051. break;
  1052. }
  1053. return 0;
  1054. }
  1055. /* Deregister an interface. */
  1056. void gb_interface_del(struct gb_interface *intf)
  1057. {
  1058. if (device_is_registered(&intf->dev)) {
  1059. trace_gb_interface_del(intf);
  1060. device_del(&intf->dev);
  1061. dev_info(&intf->dev, "Interface removed\n");
  1062. }
  1063. }
  1064. void gb_interface_put(struct gb_interface *intf)
  1065. {
  1066. put_device(&intf->dev);
  1067. }