timesync.c 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358
  1. /*
  2. * TimeSync API driver.
  3. *
  4. * Copyright 2016 Google Inc.
  5. * Copyright 2016 Linaro Ltd.
  6. *
  7. * Released under the GPLv2 only.
  8. */
  9. #include <linux/debugfs.h>
  10. #include <linux/hrtimer.h>
  11. #include "greybus.h"
  12. #include "timesync.h"
  13. #include "greybus_trace.h"
  14. /*
  15. * Minimum inter-strobe value of one millisecond is chosen because it
  16. * just-about fits the common definition of a jiffy.
  17. *
  18. * Maximum value OTOH is constrained by the number of bits the SVC can fit
  19. * into a 16 bit up-counter. The SVC configures the timer in microseconds
  20. * so the maximum allowable value is 65535 microseconds. We clip that value
  21. * to 10000 microseconds for the sake of using nice round base 10 numbers
  22. * and since right-now there's no imaginable use-case requiring anything
  23. * other than a one millisecond inter-strobe time, let alone something
  24. * higher than ten milliseconds.
  25. */
  26. #define GB_TIMESYNC_STROBE_DELAY_US 1000
  27. #define GB_TIMESYNC_DEFAULT_OFFSET_US 1000
  28. /* Work queue timers long, short and SVC strobe timeout */
  29. #define GB_TIMESYNC_DELAYED_WORK_LONG msecs_to_jiffies(10)
  30. #define GB_TIMESYNC_DELAYED_WORK_SHORT msecs_to_jiffies(1)
  31. #define GB_TIMESYNC_MAX_WAIT_SVC msecs_to_jiffies(5000)
  32. #define GB_TIMESYNC_KTIME_UPDATE msecs_to_jiffies(1000)
  33. #define GB_TIMESYNC_MAX_KTIME_CONVERSION 15
  34. /* Maximum number of times we'll retry a failed synchronous sync */
  35. #define GB_TIMESYNC_MAX_RETRIES 5
  36. /* Reported nanoseconds/femtoseconds per clock */
  37. static u64 gb_timesync_ns_per_clock;
  38. static u64 gb_timesync_fs_per_clock;
  39. /* Maximum difference we will accept converting FrameTime to ktime */
  40. static u32 gb_timesync_max_ktime_diff;
  41. /* Reported clock rate */
  42. static unsigned long gb_timesync_clock_rate;
  43. /* Workqueue */
  44. static void gb_timesync_worker(struct work_struct *work);
  45. /* List of SVCs with one FrameTime per SVC */
  46. static LIST_HEAD(gb_timesync_svc_list);
  47. /* Synchronize parallel contexts accessing a valid timesync_svc pointer */
  48. static DEFINE_MUTEX(gb_timesync_svc_list_mutex);
  49. /* Structure to convert from FrameTime to timespec/ktime */
  50. struct gb_timesync_frame_time_data {
  51. u64 frame_time;
  52. struct timespec ts;
  53. };
  54. struct gb_timesync_svc {
  55. struct list_head list;
  56. struct list_head interface_list;
  57. struct gb_svc *svc;
  58. struct gb_timesync_host_device *timesync_hd;
  59. spinlock_t spinlock; /* Per SVC spinlock to sync with ISR */
  60. struct mutex mutex; /* Per SVC mutex for regular synchronization */
  61. struct dentry *frame_time_dentry;
  62. struct dentry *frame_ktime_dentry;
  63. struct workqueue_struct *work_queue;
  64. wait_queue_head_t wait_queue;
  65. struct delayed_work delayed_work;
  66. struct timer_list ktime_timer;
  67. /* The current local FrameTime */
  68. u64 frame_time_offset;
  69. struct gb_timesync_frame_time_data strobe_data[GB_TIMESYNC_MAX_STROBES];
  70. struct gb_timesync_frame_time_data ktime_data;
  71. /* The SVC FrameTime and relative AP FrameTime @ last TIMESYNC_PING */
  72. u64 svc_ping_frame_time;
  73. u64 ap_ping_frame_time;
  74. /* Transitory settings */
  75. u32 strobe_mask;
  76. bool offset_down;
  77. bool print_ping;
  78. bool capture_ping;
  79. int strobe;
  80. /* Current state */
  81. int state;
  82. };
  83. struct gb_timesync_host_device {
  84. struct list_head list;
  85. struct gb_host_device *hd;
  86. u64 ping_frame_time;
  87. };
  88. struct gb_timesync_interface {
  89. struct list_head list;
  90. struct gb_interface *interface;
  91. u64 ping_frame_time;
  92. };
  93. enum gb_timesync_state {
  94. GB_TIMESYNC_STATE_INVALID = 0,
  95. GB_TIMESYNC_STATE_INACTIVE = 1,
  96. GB_TIMESYNC_STATE_INIT = 2,
  97. GB_TIMESYNC_STATE_WAIT_SVC = 3,
  98. GB_TIMESYNC_STATE_AUTHORITATIVE = 4,
  99. GB_TIMESYNC_STATE_PING = 5,
  100. GB_TIMESYNC_STATE_ACTIVE = 6,
  101. };
  102. static void gb_timesync_ktime_timer_fn(unsigned long data);
  103. static u64 gb_timesync_adjust_count(struct gb_timesync_svc *timesync_svc,
  104. u64 counts)
  105. {
  106. if (timesync_svc->offset_down)
  107. return counts - timesync_svc->frame_time_offset;
  108. else
  109. return counts + timesync_svc->frame_time_offset;
  110. }
  111. /*
  112. * This function provides the authoritative FrameTime to a calling function. It
  113. * is designed to be lockless and should remain that way the caller is assumed
  114. * to be state-aware.
  115. */
  116. static u64 __gb_timesync_get_frame_time(struct gb_timesync_svc *timesync_svc)
  117. {
  118. u64 clocks = gb_timesync_platform_get_counter();
  119. return gb_timesync_adjust_count(timesync_svc, clocks);
  120. }
  121. static void gb_timesync_schedule_svc_timeout(struct gb_timesync_svc
  122. *timesync_svc)
  123. {
  124. queue_delayed_work(timesync_svc->work_queue,
  125. &timesync_svc->delayed_work,
  126. GB_TIMESYNC_MAX_WAIT_SVC);
  127. }
  128. static void gb_timesync_set_state(struct gb_timesync_svc *timesync_svc,
  129. int state)
  130. {
  131. switch (state) {
  132. case GB_TIMESYNC_STATE_INVALID:
  133. timesync_svc->state = state;
  134. wake_up(&timesync_svc->wait_queue);
  135. break;
  136. case GB_TIMESYNC_STATE_INACTIVE:
  137. timesync_svc->state = state;
  138. wake_up(&timesync_svc->wait_queue);
  139. break;
  140. case GB_TIMESYNC_STATE_INIT:
  141. if (timesync_svc->state != GB_TIMESYNC_STATE_INVALID) {
  142. timesync_svc->strobe = 0;
  143. timesync_svc->frame_time_offset = 0;
  144. timesync_svc->state = state;
  145. cancel_delayed_work(&timesync_svc->delayed_work);
  146. queue_delayed_work(timesync_svc->work_queue,
  147. &timesync_svc->delayed_work,
  148. GB_TIMESYNC_DELAYED_WORK_LONG);
  149. }
  150. break;
  151. case GB_TIMESYNC_STATE_WAIT_SVC:
  152. if (timesync_svc->state == GB_TIMESYNC_STATE_INIT)
  153. timesync_svc->state = state;
  154. break;
  155. case GB_TIMESYNC_STATE_AUTHORITATIVE:
  156. if (timesync_svc->state == GB_TIMESYNC_STATE_WAIT_SVC) {
  157. timesync_svc->state = state;
  158. cancel_delayed_work(&timesync_svc->delayed_work);
  159. queue_delayed_work(timesync_svc->work_queue,
  160. &timesync_svc->delayed_work, 0);
  161. }
  162. break;
  163. case GB_TIMESYNC_STATE_PING:
  164. if (timesync_svc->state == GB_TIMESYNC_STATE_ACTIVE) {
  165. timesync_svc->state = state;
  166. queue_delayed_work(timesync_svc->work_queue,
  167. &timesync_svc->delayed_work,
  168. GB_TIMESYNC_DELAYED_WORK_SHORT);
  169. }
  170. break;
  171. case GB_TIMESYNC_STATE_ACTIVE:
  172. if (timesync_svc->state == GB_TIMESYNC_STATE_AUTHORITATIVE ||
  173. timesync_svc->state == GB_TIMESYNC_STATE_PING) {
  174. timesync_svc->state = state;
  175. wake_up(&timesync_svc->wait_queue);
  176. }
  177. break;
  178. }
  179. if (WARN_ON(timesync_svc->state != state)) {
  180. pr_err("Invalid state transition %d=>%d\n",
  181. timesync_svc->state, state);
  182. }
  183. }
  184. static void gb_timesync_set_state_atomic(struct gb_timesync_svc *timesync_svc,
  185. int state)
  186. {
  187. unsigned long flags;
  188. spin_lock_irqsave(&timesync_svc->spinlock, flags);
  189. gb_timesync_set_state(timesync_svc, state);
  190. spin_unlock_irqrestore(&timesync_svc->spinlock, flags);
  191. }
  192. static u64 gb_timesync_diff(u64 x, u64 y)
  193. {
  194. if (x > y)
  195. return x - y;
  196. else
  197. return y - x;
  198. }
  199. static void gb_timesync_adjust_to_svc(struct gb_timesync_svc *svc,
  200. u64 svc_frame_time, u64 ap_frame_time)
  201. {
  202. if (svc_frame_time > ap_frame_time) {
  203. svc->frame_time_offset = svc_frame_time - ap_frame_time;
  204. svc->offset_down = false;
  205. } else {
  206. svc->frame_time_offset = ap_frame_time - svc_frame_time;
  207. svc->offset_down = true;
  208. }
  209. }
  210. /*
  211. * Associate a FrameTime with a ktime timestamp represented as struct timespec
  212. * Requires the calling context to hold timesync_svc->mutex
  213. */
  214. static void gb_timesync_store_ktime(struct gb_timesync_svc *timesync_svc,
  215. struct timespec ts, u64 frame_time)
  216. {
  217. timesync_svc->ktime_data.ts = ts;
  218. timesync_svc->ktime_data.frame_time = frame_time;
  219. }
  220. /*
  221. * Find the two pulses that best-match our expected inter-strobe gap and
  222. * then calculate the difference between the SVC time at the second pulse
  223. * to the local time at the second pulse.
  224. */
  225. static void gb_timesync_collate_frame_time(struct gb_timesync_svc *timesync_svc,
  226. u64 *frame_time)
  227. {
  228. int i = 0;
  229. u64 delta, ap_frame_time;
  230. u64 strobe_delay_ns = GB_TIMESYNC_STROBE_DELAY_US * NSEC_PER_USEC;
  231. u64 least = 0;
  232. for (i = 1; i < GB_TIMESYNC_MAX_STROBES; i++) {
  233. delta = timesync_svc->strobe_data[i].frame_time -
  234. timesync_svc->strobe_data[i - 1].frame_time;
  235. delta *= gb_timesync_ns_per_clock;
  236. delta = gb_timesync_diff(delta, strobe_delay_ns);
  237. if (!least || delta < least) {
  238. least = delta;
  239. gb_timesync_adjust_to_svc(timesync_svc, frame_time[i],
  240. timesync_svc->strobe_data[i].frame_time);
  241. ap_frame_time = timesync_svc->strobe_data[i].frame_time;
  242. ap_frame_time = gb_timesync_adjust_count(timesync_svc,
  243. ap_frame_time);
  244. gb_timesync_store_ktime(timesync_svc,
  245. timesync_svc->strobe_data[i].ts,
  246. ap_frame_time);
  247. pr_debug("adjust %s local %llu svc %llu delta %llu\n",
  248. timesync_svc->offset_down ? "down" : "up",
  249. timesync_svc->strobe_data[i].frame_time,
  250. frame_time[i], delta);
  251. }
  252. }
  253. }
  254. static void gb_timesync_teardown(struct gb_timesync_svc *timesync_svc)
  255. {
  256. struct gb_timesync_interface *timesync_interface;
  257. struct gb_svc *svc = timesync_svc->svc;
  258. struct gb_interface *interface;
  259. struct gb_host_device *hd;
  260. int ret;
  261. list_for_each_entry(timesync_interface,
  262. &timesync_svc->interface_list, list) {
  263. interface = timesync_interface->interface;
  264. ret = gb_interface_timesync_disable(interface);
  265. if (ret) {
  266. dev_err(&interface->dev,
  267. "interface timesync_disable %d\n", ret);
  268. }
  269. }
  270. hd = timesync_svc->timesync_hd->hd;
  271. ret = hd->driver->timesync_disable(hd);
  272. if (ret < 0) {
  273. dev_err(&hd->dev, "host timesync_disable %d\n",
  274. ret);
  275. }
  276. gb_svc_timesync_wake_pins_release(svc);
  277. gb_svc_timesync_disable(svc);
  278. gb_timesync_platform_unlock_bus();
  279. gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_INACTIVE);
  280. }
  281. static void gb_timesync_platform_lock_bus_fail(struct gb_timesync_svc
  282. *timesync_svc, int ret)
  283. {
  284. if (ret == -EAGAIN) {
  285. gb_timesync_set_state(timesync_svc, timesync_svc->state);
  286. } else {
  287. pr_err("Failed to lock timesync bus %d\n", ret);
  288. gb_timesync_set_state(timesync_svc, GB_TIMESYNC_STATE_INACTIVE);
  289. }
  290. }
  291. static void gb_timesync_enable(struct gb_timesync_svc *timesync_svc)
  292. {
  293. struct gb_svc *svc = timesync_svc->svc;
  294. struct gb_host_device *hd;
  295. struct gb_timesync_interface *timesync_interface;
  296. struct gb_interface *interface;
  297. u64 init_frame_time;
  298. unsigned long clock_rate = gb_timesync_clock_rate;
  299. int ret;
  300. /*
  301. * Get access to the wake pins in the AP and SVC
  302. * Release these pins either in gb_timesync_teardown() or in
  303. * gb_timesync_authoritative()
  304. */
  305. ret = gb_timesync_platform_lock_bus(timesync_svc);
  306. if (ret < 0) {
  307. gb_timesync_platform_lock_bus_fail(timesync_svc, ret);
  308. return;
  309. }
  310. ret = gb_svc_timesync_wake_pins_acquire(svc, timesync_svc->strobe_mask);
  311. if (ret) {
  312. dev_err(&svc->dev,
  313. "gb_svc_timesync_wake_pins_acquire %d\n", ret);
  314. gb_timesync_teardown(timesync_svc);
  315. return;
  316. }
  317. /* Choose an initial time in the future */
  318. init_frame_time = __gb_timesync_get_frame_time(timesync_svc) + 100000UL;
  319. /* Send enable command to all relevant participants */
  320. list_for_each_entry(timesync_interface, &timesync_svc->interface_list,
  321. list) {
  322. interface = timesync_interface->interface;
  323. ret = gb_interface_timesync_enable(interface,
  324. GB_TIMESYNC_MAX_STROBES,
  325. init_frame_time,
  326. GB_TIMESYNC_STROBE_DELAY_US,
  327. clock_rate);
  328. if (ret) {
  329. dev_err(&interface->dev,
  330. "interface timesync_enable %d\n", ret);
  331. }
  332. }
  333. hd = timesync_svc->timesync_hd->hd;
  334. ret = hd->driver->timesync_enable(hd, GB_TIMESYNC_MAX_STROBES,
  335. init_frame_time,
  336. GB_TIMESYNC_STROBE_DELAY_US,
  337. clock_rate);
  338. if (ret < 0) {
  339. dev_err(&hd->dev, "host timesync_enable %d\n",
  340. ret);
  341. }
  342. gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_WAIT_SVC);
  343. ret = gb_svc_timesync_enable(svc, GB_TIMESYNC_MAX_STROBES,
  344. init_frame_time,
  345. GB_TIMESYNC_STROBE_DELAY_US,
  346. clock_rate);
  347. if (ret) {
  348. dev_err(&svc->dev,
  349. "gb_svc_timesync_enable %d\n", ret);
  350. gb_timesync_teardown(timesync_svc);
  351. return;
  352. }
  353. /* Schedule a timeout waiting for SVC to complete strobing */
  354. gb_timesync_schedule_svc_timeout(timesync_svc);
  355. }
  356. static void gb_timesync_authoritative(struct gb_timesync_svc *timesync_svc)
  357. {
  358. struct gb_svc *svc = timesync_svc->svc;
  359. struct gb_host_device *hd;
  360. struct gb_timesync_interface *timesync_interface;
  361. struct gb_interface *interface;
  362. u64 svc_frame_time[GB_TIMESYNC_MAX_STROBES];
  363. int ret;
  364. /* Get authoritative time from SVC and adjust local clock */
  365. ret = gb_svc_timesync_authoritative(svc, svc_frame_time);
  366. if (ret) {
  367. dev_err(&svc->dev,
  368. "gb_svc_timesync_authoritative %d\n", ret);
  369. gb_timesync_teardown(timesync_svc);
  370. return;
  371. }
  372. gb_timesync_collate_frame_time(timesync_svc, svc_frame_time);
  373. /* Transmit authoritative time to downstream slaves */
  374. hd = timesync_svc->timesync_hd->hd;
  375. ret = hd->driver->timesync_authoritative(hd, svc_frame_time);
  376. if (ret < 0)
  377. dev_err(&hd->dev, "host timesync_authoritative %d\n", ret);
  378. list_for_each_entry(timesync_interface,
  379. &timesync_svc->interface_list, list) {
  380. interface = timesync_interface->interface;
  381. ret = gb_interface_timesync_authoritative(
  382. interface,
  383. svc_frame_time);
  384. if (ret) {
  385. dev_err(&interface->dev,
  386. "interface timesync_authoritative %d\n", ret);
  387. }
  388. }
  389. /* Release wake pins */
  390. gb_svc_timesync_wake_pins_release(svc);
  391. gb_timesync_platform_unlock_bus();
  392. /* Transition to state ACTIVE */
  393. gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_ACTIVE);
  394. /* Schedule a ping to verify the synchronized system time */
  395. timesync_svc->print_ping = true;
  396. gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_PING);
  397. }
  398. static int __gb_timesync_get_status(struct gb_timesync_svc *timesync_svc)
  399. {
  400. int ret = -EINVAL;
  401. switch (timesync_svc->state) {
  402. case GB_TIMESYNC_STATE_INVALID:
  403. case GB_TIMESYNC_STATE_INACTIVE:
  404. ret = -ENODEV;
  405. break;
  406. case GB_TIMESYNC_STATE_INIT:
  407. case GB_TIMESYNC_STATE_WAIT_SVC:
  408. case GB_TIMESYNC_STATE_AUTHORITATIVE:
  409. ret = -EAGAIN;
  410. break;
  411. case GB_TIMESYNC_STATE_PING:
  412. case GB_TIMESYNC_STATE_ACTIVE:
  413. ret = 0;
  414. break;
  415. }
  416. return ret;
  417. }
  418. /*
  419. * This routine takes a FrameTime and derives the difference with-respect
  420. * to a reference FrameTime/ktime pair. It then returns the calculated
  421. * ktime based on the difference between the supplied FrameTime and
  422. * the reference FrameTime.
  423. *
  424. * The time difference is calculated to six decimal places. Taking 19.2MHz
  425. * as an example this means we have 52.083333~ nanoseconds per clock or
  426. * 52083333~ femtoseconds per clock.
  427. *
  428. * Naively taking the count difference and converting to
  429. * seconds/nanoseconds would quickly see the 0.0833 component produce
  430. * noticeable errors. For example a time difference of one second would
  431. * loose 19200000 * 0.08333x nanoseconds or 1.59 seconds.
  432. *
  433. * In contrast calculating in femtoseconds the same example of 19200000 *
  434. * 0.000000083333x nanoseconds per count of error is just 1.59 nanoseconds!
  435. *
  436. * Continuing the example of 19.2 MHz we cap the maximum error difference
  437. * at a worst-case 0.3 microseconds over a potential calculation window of
  438. * abount 15 seconds, meaning you can convert a FrameTime that is <= 15
  439. * seconds older/younger than the reference time with a maximum error of
  440. * 0.2385 useconds. Note 19.2MHz is an example frequency not a requirement.
  441. */
  442. static int gb_timesync_to_timespec(struct gb_timesync_svc *timesync_svc,
  443. u64 frame_time, struct timespec *ts)
  444. {
  445. unsigned long flags;
  446. u64 delta_fs, counts, sec, nsec;
  447. bool add;
  448. int ret = 0;
  449. memset(ts, 0x00, sizeof(*ts));
  450. mutex_lock(&timesync_svc->mutex);
  451. spin_lock_irqsave(&timesync_svc->spinlock, flags);
  452. ret = __gb_timesync_get_status(timesync_svc);
  453. if (ret)
  454. goto done;
  455. /* Support calculating ktime upwards or downwards from the reference */
  456. if (frame_time < timesync_svc->ktime_data.frame_time) {
  457. add = false;
  458. counts = timesync_svc->ktime_data.frame_time - frame_time;
  459. } else {
  460. add = true;
  461. counts = frame_time - timesync_svc->ktime_data.frame_time;
  462. }
  463. /* Enforce the .23 of a usecond boundary @ 19.2MHz */
  464. if (counts > gb_timesync_max_ktime_diff) {
  465. ret = -EINVAL;
  466. goto done;
  467. }
  468. /* Determine the time difference in femtoseconds */
  469. delta_fs = counts * gb_timesync_fs_per_clock;
  470. /* Convert to seconds */
  471. sec = delta_fs;
  472. do_div(sec, NSEC_PER_SEC);
  473. do_div(sec, 1000000UL);
  474. /* Get the nanosecond remainder */
  475. nsec = do_div(delta_fs, sec);
  476. do_div(nsec, 1000000UL);
  477. if (add) {
  478. /* Add the calculated offset - overflow nanoseconds upwards */
  479. ts->tv_sec = timesync_svc->ktime_data.ts.tv_sec + sec;
  480. ts->tv_nsec = timesync_svc->ktime_data.ts.tv_nsec + nsec;
  481. if (ts->tv_nsec >= NSEC_PER_SEC) {
  482. ts->tv_sec++;
  483. ts->tv_nsec -= NSEC_PER_SEC;
  484. }
  485. } else {
  486. /* Subtract the difference over/underflow as necessary */
  487. if (nsec > timesync_svc->ktime_data.ts.tv_nsec) {
  488. sec++;
  489. nsec = nsec + timesync_svc->ktime_data.ts.tv_nsec;
  490. nsec = do_div(nsec, NSEC_PER_SEC);
  491. } else {
  492. nsec = timesync_svc->ktime_data.ts.tv_nsec - nsec;
  493. }
  494. /* Cannot return a negative second value */
  495. if (sec > timesync_svc->ktime_data.ts.tv_sec) {
  496. ret = -EINVAL;
  497. goto done;
  498. }
  499. ts->tv_sec = timesync_svc->ktime_data.ts.tv_sec - sec;
  500. ts->tv_nsec = nsec;
  501. }
  502. done:
  503. spin_unlock_irqrestore(&timesync_svc->spinlock, flags);
  504. mutex_unlock(&timesync_svc->mutex);
  505. return ret;
  506. }
  507. static size_t gb_timesync_log_frame_time(struct gb_timesync_svc *timesync_svc,
  508. char *buf, size_t buflen)
  509. {
  510. struct gb_svc *svc = timesync_svc->svc;
  511. struct gb_host_device *hd;
  512. struct gb_timesync_interface *timesync_interface;
  513. struct gb_interface *interface;
  514. unsigned int len;
  515. size_t off;
  516. /* AP/SVC */
  517. off = snprintf(buf, buflen, "%s frametime: ap=%llu %s=%llu ",
  518. greybus_bus_type.name,
  519. timesync_svc->ap_ping_frame_time, dev_name(&svc->dev),
  520. timesync_svc->svc_ping_frame_time);
  521. len = buflen - off;
  522. /* APB/GPB */
  523. if (len < buflen) {
  524. hd = timesync_svc->timesync_hd->hd;
  525. off += snprintf(&buf[off], len, "%s=%llu ", dev_name(&hd->dev),
  526. timesync_svc->timesync_hd->ping_frame_time);
  527. len = buflen - off;
  528. }
  529. list_for_each_entry(timesync_interface,
  530. &timesync_svc->interface_list, list) {
  531. if (len < buflen) {
  532. interface = timesync_interface->interface;
  533. off += snprintf(&buf[off], len, "%s=%llu ",
  534. dev_name(&interface->dev),
  535. timesync_interface->ping_frame_time);
  536. len = buflen - off;
  537. }
  538. }
  539. if (len < buflen)
  540. off += snprintf(&buf[off], len, "\n");
  541. return off;
  542. }
  543. static size_t gb_timesync_log_frame_ktime(struct gb_timesync_svc *timesync_svc,
  544. char *buf, size_t buflen)
  545. {
  546. struct gb_svc *svc = timesync_svc->svc;
  547. struct gb_host_device *hd;
  548. struct gb_timesync_interface *timesync_interface;
  549. struct gb_interface *interface;
  550. struct timespec ts;
  551. unsigned int len;
  552. size_t off;
  553. /* AP */
  554. gb_timesync_to_timespec(timesync_svc, timesync_svc->ap_ping_frame_time,
  555. &ts);
  556. off = snprintf(buf, buflen, "%s frametime: ap=%lu.%lu ",
  557. greybus_bus_type.name, ts.tv_sec, ts.tv_nsec);
  558. len = buflen - off;
  559. if (len >= buflen)
  560. goto done;
  561. /* SVC */
  562. gb_timesync_to_timespec(timesync_svc, timesync_svc->svc_ping_frame_time,
  563. &ts);
  564. off += snprintf(&buf[off], len, "%s=%lu.%lu ", dev_name(&svc->dev),
  565. ts.tv_sec, ts.tv_nsec);
  566. len = buflen - off;
  567. if (len >= buflen)
  568. goto done;
  569. /* APB/GPB */
  570. hd = timesync_svc->timesync_hd->hd;
  571. gb_timesync_to_timespec(timesync_svc,
  572. timesync_svc->timesync_hd->ping_frame_time,
  573. &ts);
  574. off += snprintf(&buf[off], len, "%s=%lu.%lu ",
  575. dev_name(&hd->dev),
  576. ts.tv_sec, ts.tv_nsec);
  577. len = buflen - off;
  578. if (len >= buflen)
  579. goto done;
  580. list_for_each_entry(timesync_interface,
  581. &timesync_svc->interface_list, list) {
  582. interface = timesync_interface->interface;
  583. gb_timesync_to_timespec(timesync_svc,
  584. timesync_interface->ping_frame_time,
  585. &ts);
  586. off += snprintf(&buf[off], len, "%s=%lu.%lu ",
  587. dev_name(&interface->dev),
  588. ts.tv_sec, ts.tv_nsec);
  589. len = buflen - off;
  590. if (len >= buflen)
  591. goto done;
  592. }
  593. off += snprintf(&buf[off], len, "\n");
  594. done:
  595. return off;
  596. }
  597. /*
  598. * Send an SVC initiated wake 'ping' to each TimeSync participant.
  599. * Get the FrameTime from each participant associated with the wake
  600. * ping.
  601. */
  602. static void gb_timesync_ping(struct gb_timesync_svc *timesync_svc)
  603. {
  604. struct gb_svc *svc = timesync_svc->svc;
  605. struct gb_host_device *hd;
  606. struct gb_timesync_interface *timesync_interface;
  607. struct gb_control *control;
  608. u64 *ping_frame_time;
  609. int ret;
  610. /* Get access to the wake pins in the AP and SVC */
  611. ret = gb_timesync_platform_lock_bus(timesync_svc);
  612. if (ret < 0) {
  613. gb_timesync_platform_lock_bus_fail(timesync_svc, ret);
  614. return;
  615. }
  616. ret = gb_svc_timesync_wake_pins_acquire(svc, timesync_svc->strobe_mask);
  617. if (ret) {
  618. dev_err(&svc->dev,
  619. "gb_svc_timesync_wake_pins_acquire %d\n", ret);
  620. gb_timesync_teardown(timesync_svc);
  621. return;
  622. }
  623. /* Have SVC generate a timesync ping */
  624. timesync_svc->capture_ping = true;
  625. timesync_svc->svc_ping_frame_time = 0;
  626. ret = gb_svc_timesync_ping(svc, &timesync_svc->svc_ping_frame_time);
  627. timesync_svc->capture_ping = false;
  628. if (ret) {
  629. dev_err(&svc->dev,
  630. "gb_svc_timesync_ping %d\n", ret);
  631. gb_timesync_teardown(timesync_svc);
  632. return;
  633. }
  634. /* Get the ping FrameTime from each APB/GPB */
  635. hd = timesync_svc->timesync_hd->hd;
  636. timesync_svc->timesync_hd->ping_frame_time = 0;
  637. ret = hd->driver->timesync_get_last_event(hd,
  638. &timesync_svc->timesync_hd->ping_frame_time);
  639. if (ret)
  640. dev_err(&hd->dev, "host timesync_get_last_event %d\n", ret);
  641. list_for_each_entry(timesync_interface,
  642. &timesync_svc->interface_list, list) {
  643. control = timesync_interface->interface->control;
  644. timesync_interface->ping_frame_time = 0;
  645. ping_frame_time = &timesync_interface->ping_frame_time;
  646. ret = gb_control_timesync_get_last_event(control,
  647. ping_frame_time);
  648. if (ret) {
  649. dev_err(&timesync_interface->interface->dev,
  650. "gb_control_timesync_get_last_event %d\n", ret);
  651. }
  652. }
  653. /* Ping success - move to timesync active */
  654. gb_svc_timesync_wake_pins_release(svc);
  655. gb_timesync_platform_unlock_bus();
  656. gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_ACTIVE);
  657. }
  658. static void gb_timesync_log_ping_time(struct gb_timesync_svc *timesync_svc)
  659. {
  660. char *buf;
  661. if (!timesync_svc->print_ping)
  662. return;
  663. buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
  664. if (buf) {
  665. gb_timesync_log_frame_time(timesync_svc, buf, PAGE_SIZE);
  666. dev_dbg(&timesync_svc->svc->dev, "%s", buf);
  667. kfree(buf);
  668. }
  669. }
  670. /*
  671. * Perform the actual work of scheduled TimeSync logic.
  672. */
  673. static void gb_timesync_worker(struct work_struct *work)
  674. {
  675. struct delayed_work *delayed_work = to_delayed_work(work);
  676. struct gb_timesync_svc *timesync_svc =
  677. container_of(delayed_work, struct gb_timesync_svc, delayed_work);
  678. mutex_lock(&timesync_svc->mutex);
  679. switch (timesync_svc->state) {
  680. case GB_TIMESYNC_STATE_INIT:
  681. gb_timesync_enable(timesync_svc);
  682. break;
  683. case GB_TIMESYNC_STATE_WAIT_SVC:
  684. dev_err(&timesync_svc->svc->dev,
  685. "timeout SVC strobe completion %d/%d\n",
  686. timesync_svc->strobe, GB_TIMESYNC_MAX_STROBES);
  687. gb_timesync_teardown(timesync_svc);
  688. break;
  689. case GB_TIMESYNC_STATE_AUTHORITATIVE:
  690. gb_timesync_authoritative(timesync_svc);
  691. break;
  692. case GB_TIMESYNC_STATE_PING:
  693. gb_timesync_ping(timesync_svc);
  694. gb_timesync_log_ping_time(timesync_svc);
  695. break;
  696. default:
  697. pr_err("Invalid state %d for delayed work\n",
  698. timesync_svc->state);
  699. break;
  700. }
  701. mutex_unlock(&timesync_svc->mutex);
  702. }
  703. /*
  704. * Schedule a new TimeSync INIT or PING operation serialized w/r to
  705. * gb_timesync_worker().
  706. */
  707. static int gb_timesync_schedule(struct gb_timesync_svc *timesync_svc, int state)
  708. {
  709. int ret = 0;
  710. if (state != GB_TIMESYNC_STATE_INIT && state != GB_TIMESYNC_STATE_PING)
  711. return -EINVAL;
  712. mutex_lock(&timesync_svc->mutex);
  713. if (timesync_svc->state != GB_TIMESYNC_STATE_INVALID) {
  714. gb_timesync_set_state_atomic(timesync_svc, state);
  715. } else {
  716. ret = -ENODEV;
  717. }
  718. mutex_unlock(&timesync_svc->mutex);
  719. return ret;
  720. }
  721. static int __gb_timesync_schedule_synchronous(
  722. struct gb_timesync_svc *timesync_svc, int state)
  723. {
  724. unsigned long flags;
  725. int ret;
  726. ret = gb_timesync_schedule(timesync_svc, state);
  727. if (ret)
  728. return ret;
  729. ret = wait_event_interruptible(timesync_svc->wait_queue,
  730. (timesync_svc->state == GB_TIMESYNC_STATE_ACTIVE ||
  731. timesync_svc->state == GB_TIMESYNC_STATE_INACTIVE ||
  732. timesync_svc->state == GB_TIMESYNC_STATE_INVALID));
  733. if (ret)
  734. return ret;
  735. mutex_lock(&timesync_svc->mutex);
  736. spin_lock_irqsave(&timesync_svc->spinlock, flags);
  737. ret = __gb_timesync_get_status(timesync_svc);
  738. spin_unlock_irqrestore(&timesync_svc->spinlock, flags);
  739. mutex_unlock(&timesync_svc->mutex);
  740. return ret;
  741. }
  742. static struct gb_timesync_svc *gb_timesync_find_timesync_svc(
  743. struct gb_host_device *hd)
  744. {
  745. struct gb_timesync_svc *timesync_svc;
  746. list_for_each_entry(timesync_svc, &gb_timesync_svc_list, list) {
  747. if (timesync_svc->svc == hd->svc)
  748. return timesync_svc;
  749. }
  750. return NULL;
  751. }
  752. static struct gb_timesync_interface *gb_timesync_find_timesync_interface(
  753. struct gb_timesync_svc *timesync_svc,
  754. struct gb_interface *interface)
  755. {
  756. struct gb_timesync_interface *timesync_interface;
  757. list_for_each_entry(timesync_interface, &timesync_svc->interface_list, list) {
  758. if (timesync_interface->interface == interface)
  759. return timesync_interface;
  760. }
  761. return NULL;
  762. }
  763. int gb_timesync_schedule_synchronous(struct gb_interface *interface)
  764. {
  765. int ret;
  766. struct gb_timesync_svc *timesync_svc;
  767. int retries;
  768. if (!(interface->features & GREYBUS_INTERFACE_FEATURE_TIMESYNC))
  769. return 0;
  770. mutex_lock(&gb_timesync_svc_list_mutex);
  771. for (retries = 0; retries < GB_TIMESYNC_MAX_RETRIES; retries++) {
  772. timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
  773. if (!timesync_svc) {
  774. ret = -ENODEV;
  775. goto done;
  776. }
  777. ret = __gb_timesync_schedule_synchronous(timesync_svc,
  778. GB_TIMESYNC_STATE_INIT);
  779. if (!ret)
  780. break;
  781. }
  782. if (ret && retries == GB_TIMESYNC_MAX_RETRIES)
  783. ret = -ETIMEDOUT;
  784. done:
  785. mutex_unlock(&gb_timesync_svc_list_mutex);
  786. return ret;
  787. }
  788. EXPORT_SYMBOL_GPL(gb_timesync_schedule_synchronous);
  789. void gb_timesync_schedule_asynchronous(struct gb_interface *interface)
  790. {
  791. struct gb_timesync_svc *timesync_svc;
  792. if (!(interface->features & GREYBUS_INTERFACE_FEATURE_TIMESYNC))
  793. return;
  794. mutex_lock(&gb_timesync_svc_list_mutex);
  795. timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
  796. if (!timesync_svc)
  797. goto done;
  798. gb_timesync_schedule(timesync_svc, GB_TIMESYNC_STATE_INIT);
  799. done:
  800. mutex_unlock(&gb_timesync_svc_list_mutex);
  801. return;
  802. }
  803. EXPORT_SYMBOL_GPL(gb_timesync_schedule_asynchronous);
  804. static ssize_t gb_timesync_ping_read(struct file *file, char __user *ubuf,
  805. size_t len, loff_t *offset, bool ktime)
  806. {
  807. struct gb_timesync_svc *timesync_svc = file->f_inode->i_private;
  808. char *buf;
  809. ssize_t ret = 0;
  810. mutex_lock(&gb_timesync_svc_list_mutex);
  811. mutex_lock(&timesync_svc->mutex);
  812. if (list_empty(&timesync_svc->interface_list))
  813. ret = -ENODEV;
  814. timesync_svc->print_ping = false;
  815. mutex_unlock(&timesync_svc->mutex);
  816. if (ret)
  817. goto done;
  818. ret = __gb_timesync_schedule_synchronous(timesync_svc,
  819. GB_TIMESYNC_STATE_PING);
  820. if (ret)
  821. goto done;
  822. buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
  823. if (!buf) {
  824. ret = -ENOMEM;
  825. goto done;
  826. }
  827. if (ktime)
  828. ret = gb_timesync_log_frame_ktime(timesync_svc, buf, PAGE_SIZE);
  829. else
  830. ret = gb_timesync_log_frame_time(timesync_svc, buf, PAGE_SIZE);
  831. if (ret > 0)
  832. ret = simple_read_from_buffer(ubuf, len, offset, buf, ret);
  833. kfree(buf);
  834. done:
  835. mutex_unlock(&gb_timesync_svc_list_mutex);
  836. return ret;
  837. }
  838. static ssize_t gb_timesync_ping_read_frame_time(struct file *file,
  839. char __user *buf,
  840. size_t len, loff_t *offset)
  841. {
  842. return gb_timesync_ping_read(file, buf, len, offset, false);
  843. }
  844. static ssize_t gb_timesync_ping_read_frame_ktime(struct file *file,
  845. char __user *buf,
  846. size_t len, loff_t *offset)
  847. {
  848. return gb_timesync_ping_read(file, buf, len, offset, true);
  849. }
  850. static const struct file_operations gb_timesync_debugfs_frame_time_ops = {
  851. .read = gb_timesync_ping_read_frame_time,
  852. };
  853. static const struct file_operations gb_timesync_debugfs_frame_ktime_ops = {
  854. .read = gb_timesync_ping_read_frame_ktime,
  855. };
  856. static int gb_timesync_hd_add(struct gb_timesync_svc *timesync_svc,
  857. struct gb_host_device *hd)
  858. {
  859. struct gb_timesync_host_device *timesync_hd;
  860. timesync_hd = kzalloc(sizeof(*timesync_hd), GFP_KERNEL);
  861. if (!timesync_hd)
  862. return -ENOMEM;
  863. WARN_ON(timesync_svc->timesync_hd);
  864. timesync_hd->hd = hd;
  865. timesync_svc->timesync_hd = timesync_hd;
  866. return 0;
  867. }
  868. static void gb_timesync_hd_remove(struct gb_timesync_svc *timesync_svc,
  869. struct gb_host_device *hd)
  870. {
  871. if (timesync_svc->timesync_hd->hd == hd) {
  872. kfree(timesync_svc->timesync_hd);
  873. timesync_svc->timesync_hd = NULL;
  874. return;
  875. }
  876. WARN_ON(1);
  877. }
  878. int gb_timesync_svc_add(struct gb_svc *svc)
  879. {
  880. struct gb_timesync_svc *timesync_svc;
  881. int ret;
  882. timesync_svc = kzalloc(sizeof(*timesync_svc), GFP_KERNEL);
  883. if (!timesync_svc)
  884. return -ENOMEM;
  885. timesync_svc->work_queue =
  886. create_singlethread_workqueue("gb-timesync-work_queue");
  887. if (!timesync_svc->work_queue) {
  888. kfree(timesync_svc);
  889. return -ENOMEM;
  890. }
  891. mutex_lock(&gb_timesync_svc_list_mutex);
  892. INIT_LIST_HEAD(&timesync_svc->interface_list);
  893. INIT_DELAYED_WORK(&timesync_svc->delayed_work, gb_timesync_worker);
  894. mutex_init(&timesync_svc->mutex);
  895. spin_lock_init(&timesync_svc->spinlock);
  896. init_waitqueue_head(&timesync_svc->wait_queue);
  897. timesync_svc->svc = svc;
  898. timesync_svc->frame_time_offset = 0;
  899. timesync_svc->capture_ping = false;
  900. gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_INACTIVE);
  901. timesync_svc->frame_time_dentry =
  902. debugfs_create_file("frame-time", S_IRUGO, svc->debugfs_dentry,
  903. timesync_svc,
  904. &gb_timesync_debugfs_frame_time_ops);
  905. timesync_svc->frame_ktime_dentry =
  906. debugfs_create_file("frame-ktime", S_IRUGO, svc->debugfs_dentry,
  907. timesync_svc,
  908. &gb_timesync_debugfs_frame_ktime_ops);
  909. list_add(&timesync_svc->list, &gb_timesync_svc_list);
  910. ret = gb_timesync_hd_add(timesync_svc, svc->hd);
  911. if (ret) {
  912. list_del(&timesync_svc->list);
  913. debugfs_remove(timesync_svc->frame_ktime_dentry);
  914. debugfs_remove(timesync_svc->frame_time_dentry);
  915. destroy_workqueue(timesync_svc->work_queue);
  916. kfree(timesync_svc);
  917. goto done;
  918. }
  919. init_timer(&timesync_svc->ktime_timer);
  920. timesync_svc->ktime_timer.function = gb_timesync_ktime_timer_fn;
  921. timesync_svc->ktime_timer.expires = jiffies + GB_TIMESYNC_KTIME_UPDATE;
  922. timesync_svc->ktime_timer.data = (unsigned long)timesync_svc;
  923. add_timer(&timesync_svc->ktime_timer);
  924. done:
  925. mutex_unlock(&gb_timesync_svc_list_mutex);
  926. return ret;
  927. }
  928. EXPORT_SYMBOL_GPL(gb_timesync_svc_add);
  929. void gb_timesync_svc_remove(struct gb_svc *svc)
  930. {
  931. struct gb_timesync_svc *timesync_svc;
  932. struct gb_timesync_interface *timesync_interface;
  933. struct gb_timesync_interface *next;
  934. mutex_lock(&gb_timesync_svc_list_mutex);
  935. timesync_svc = gb_timesync_find_timesync_svc(svc->hd);
  936. if (!timesync_svc)
  937. goto done;
  938. cancel_delayed_work_sync(&timesync_svc->delayed_work);
  939. mutex_lock(&timesync_svc->mutex);
  940. gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_INVALID);
  941. del_timer_sync(&timesync_svc->ktime_timer);
  942. gb_timesync_teardown(timesync_svc);
  943. gb_timesync_hd_remove(timesync_svc, svc->hd);
  944. list_for_each_entry_safe(timesync_interface, next,
  945. &timesync_svc->interface_list, list) {
  946. list_del(&timesync_interface->list);
  947. kfree(timesync_interface);
  948. }
  949. debugfs_remove(timesync_svc->frame_ktime_dentry);
  950. debugfs_remove(timesync_svc->frame_time_dentry);
  951. destroy_workqueue(timesync_svc->work_queue);
  952. list_del(&timesync_svc->list);
  953. mutex_unlock(&timesync_svc->mutex);
  954. kfree(timesync_svc);
  955. done:
  956. mutex_unlock(&gb_timesync_svc_list_mutex);
  957. }
  958. EXPORT_SYMBOL_GPL(gb_timesync_svc_remove);
  959. /*
  960. * Add a Greybus Interface to the set of TimeSync Interfaces.
  961. */
  962. int gb_timesync_interface_add(struct gb_interface *interface)
  963. {
  964. struct gb_timesync_svc *timesync_svc;
  965. struct gb_timesync_interface *timesync_interface;
  966. int ret = 0;
  967. if (!(interface->features & GREYBUS_INTERFACE_FEATURE_TIMESYNC))
  968. return 0;
  969. mutex_lock(&gb_timesync_svc_list_mutex);
  970. timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
  971. if (!timesync_svc) {
  972. ret = -ENODEV;
  973. goto done;
  974. }
  975. timesync_interface = kzalloc(sizeof(*timesync_interface), GFP_KERNEL);
  976. if (!timesync_interface) {
  977. ret = -ENOMEM;
  978. goto done;
  979. }
  980. mutex_lock(&timesync_svc->mutex);
  981. timesync_interface->interface = interface;
  982. list_add(&timesync_interface->list, &timesync_svc->interface_list);
  983. timesync_svc->strobe_mask |= 1 << interface->interface_id;
  984. mutex_unlock(&timesync_svc->mutex);
  985. done:
  986. mutex_unlock(&gb_timesync_svc_list_mutex);
  987. return ret;
  988. }
  989. EXPORT_SYMBOL_GPL(gb_timesync_interface_add);
  990. /*
  991. * Remove a Greybus Interface from the set of TimeSync Interfaces.
  992. */
  993. void gb_timesync_interface_remove(struct gb_interface *interface)
  994. {
  995. struct gb_timesync_svc *timesync_svc;
  996. struct gb_timesync_interface *timesync_interface;
  997. if (!(interface->features & GREYBUS_INTERFACE_FEATURE_TIMESYNC))
  998. return;
  999. mutex_lock(&gb_timesync_svc_list_mutex);
  1000. timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
  1001. if (!timesync_svc)
  1002. goto done;
  1003. timesync_interface = gb_timesync_find_timesync_interface(timesync_svc,
  1004. interface);
  1005. if (!timesync_interface)
  1006. goto done;
  1007. mutex_lock(&timesync_svc->mutex);
  1008. timesync_svc->strobe_mask &= ~(1 << interface->interface_id);
  1009. list_del(&timesync_interface->list);
  1010. kfree(timesync_interface);
  1011. mutex_unlock(&timesync_svc->mutex);
  1012. done:
  1013. mutex_unlock(&gb_timesync_svc_list_mutex);
  1014. }
  1015. EXPORT_SYMBOL_GPL(gb_timesync_interface_remove);
  1016. /*
  1017. * Give the authoritative FrameTime to the calling function. Returns zero if we
  1018. * are not in GB_TIMESYNC_STATE_ACTIVE.
  1019. */
  1020. static u64 gb_timesync_get_frame_time(struct gb_timesync_svc *timesync_svc)
  1021. {
  1022. unsigned long flags;
  1023. u64 ret;
  1024. spin_lock_irqsave(&timesync_svc->spinlock, flags);
  1025. if (timesync_svc->state == GB_TIMESYNC_STATE_ACTIVE)
  1026. ret = __gb_timesync_get_frame_time(timesync_svc);
  1027. else
  1028. ret = 0;
  1029. spin_unlock_irqrestore(&timesync_svc->spinlock, flags);
  1030. return ret;
  1031. }
  1032. u64 gb_timesync_get_frame_time_by_interface(struct gb_interface *interface)
  1033. {
  1034. struct gb_timesync_svc *timesync_svc;
  1035. u64 ret = 0;
  1036. mutex_lock(&gb_timesync_svc_list_mutex);
  1037. timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
  1038. if (!timesync_svc)
  1039. goto done;
  1040. ret = gb_timesync_get_frame_time(timesync_svc);
  1041. done:
  1042. mutex_unlock(&gb_timesync_svc_list_mutex);
  1043. return ret;
  1044. }
  1045. EXPORT_SYMBOL_GPL(gb_timesync_get_frame_time_by_interface);
  1046. u64 gb_timesync_get_frame_time_by_svc(struct gb_svc *svc)
  1047. {
  1048. struct gb_timesync_svc *timesync_svc;
  1049. u64 ret = 0;
  1050. mutex_lock(&gb_timesync_svc_list_mutex);
  1051. timesync_svc = gb_timesync_find_timesync_svc(svc->hd);
  1052. if (!timesync_svc)
  1053. goto done;
  1054. ret = gb_timesync_get_frame_time(timesync_svc);
  1055. done:
  1056. mutex_unlock(&gb_timesync_svc_list_mutex);
  1057. return ret;
  1058. }
  1059. EXPORT_SYMBOL_GPL(gb_timesync_get_frame_time_by_svc);
  1060. /* Incrementally updates the conversion base from FrameTime to ktime */
  1061. static void gb_timesync_ktime_timer_fn(unsigned long data)
  1062. {
  1063. struct gb_timesync_svc *timesync_svc =
  1064. (struct gb_timesync_svc *)data;
  1065. unsigned long flags;
  1066. u64 frame_time;
  1067. struct timespec ts;
  1068. spin_lock_irqsave(&timesync_svc->spinlock, flags);
  1069. if (timesync_svc->state != GB_TIMESYNC_STATE_ACTIVE)
  1070. goto done;
  1071. ktime_get_ts(&ts);
  1072. frame_time = __gb_timesync_get_frame_time(timesync_svc);
  1073. gb_timesync_store_ktime(timesync_svc, ts, frame_time);
  1074. done:
  1075. spin_unlock_irqrestore(&timesync_svc->spinlock, flags);
  1076. mod_timer(&timesync_svc->ktime_timer,
  1077. jiffies + GB_TIMESYNC_KTIME_UPDATE);
  1078. }
  1079. int gb_timesync_to_timespec_by_svc(struct gb_svc *svc, u64 frame_time,
  1080. struct timespec *ts)
  1081. {
  1082. struct gb_timesync_svc *timesync_svc;
  1083. int ret = 0;
  1084. mutex_lock(&gb_timesync_svc_list_mutex);
  1085. timesync_svc = gb_timesync_find_timesync_svc(svc->hd);
  1086. if (!timesync_svc) {
  1087. ret = -ENODEV;
  1088. goto done;
  1089. }
  1090. ret = gb_timesync_to_timespec(timesync_svc, frame_time, ts);
  1091. done:
  1092. mutex_unlock(&gb_timesync_svc_list_mutex);
  1093. return ret;
  1094. }
  1095. EXPORT_SYMBOL_GPL(gb_timesync_to_timespec_by_svc);
  1096. int gb_timesync_to_timespec_by_interface(struct gb_interface *interface,
  1097. u64 frame_time, struct timespec *ts)
  1098. {
  1099. struct gb_timesync_svc *timesync_svc;
  1100. int ret = 0;
  1101. mutex_lock(&gb_timesync_svc_list_mutex);
  1102. timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
  1103. if (!timesync_svc) {
  1104. ret = -ENODEV;
  1105. goto done;
  1106. }
  1107. ret = gb_timesync_to_timespec(timesync_svc, frame_time, ts);
  1108. done:
  1109. mutex_unlock(&gb_timesync_svc_list_mutex);
  1110. return ret;
  1111. }
  1112. EXPORT_SYMBOL_GPL(gb_timesync_to_timespec_by_interface);
  1113. void gb_timesync_irq(struct gb_timesync_svc *timesync_svc)
  1114. {
  1115. unsigned long flags;
  1116. u64 strobe_time;
  1117. bool strobe_is_ping = true;
  1118. struct timespec ts;
  1119. ktime_get_ts(&ts);
  1120. strobe_time = __gb_timesync_get_frame_time(timesync_svc);
  1121. spin_lock_irqsave(&timesync_svc->spinlock, flags);
  1122. if (timesync_svc->state == GB_TIMESYNC_STATE_PING) {
  1123. if (!timesync_svc->capture_ping)
  1124. goto done_nolog;
  1125. timesync_svc->ap_ping_frame_time = strobe_time;
  1126. goto done_log;
  1127. } else if (timesync_svc->state != GB_TIMESYNC_STATE_WAIT_SVC) {
  1128. goto done_nolog;
  1129. }
  1130. timesync_svc->strobe_data[timesync_svc->strobe].frame_time = strobe_time;
  1131. timesync_svc->strobe_data[timesync_svc->strobe].ts = ts;
  1132. if (++timesync_svc->strobe == GB_TIMESYNC_MAX_STROBES) {
  1133. gb_timesync_set_state(timesync_svc,
  1134. GB_TIMESYNC_STATE_AUTHORITATIVE);
  1135. }
  1136. strobe_is_ping = false;
  1137. done_log:
  1138. trace_gb_timesync_irq(strobe_is_ping, timesync_svc->strobe,
  1139. GB_TIMESYNC_MAX_STROBES, strobe_time);
  1140. done_nolog:
  1141. spin_unlock_irqrestore(&timesync_svc->spinlock, flags);
  1142. }
  1143. EXPORT_SYMBOL(gb_timesync_irq);
  1144. int __init gb_timesync_init(void)
  1145. {
  1146. int ret = 0;
  1147. ret = gb_timesync_platform_init();
  1148. if (ret) {
  1149. pr_err("timesync platform init fail!\n");
  1150. return ret;
  1151. }
  1152. gb_timesync_clock_rate = gb_timesync_platform_get_clock_rate();
  1153. /* Calculate nanoseconds and femtoseconds per clock */
  1154. gb_timesync_fs_per_clock = FSEC_PER_SEC;
  1155. do_div(gb_timesync_fs_per_clock, gb_timesync_clock_rate);
  1156. gb_timesync_ns_per_clock = NSEC_PER_SEC;
  1157. do_div(gb_timesync_ns_per_clock, gb_timesync_clock_rate);
  1158. /* Calculate the maximum number of clocks we will convert to ktime */
  1159. gb_timesync_max_ktime_diff =
  1160. GB_TIMESYNC_MAX_KTIME_CONVERSION * gb_timesync_clock_rate;
  1161. pr_info("Time-Sync @ %lu Hz max ktime conversion +/- %d seconds\n",
  1162. gb_timesync_clock_rate, GB_TIMESYNC_MAX_KTIME_CONVERSION);
  1163. return 0;
  1164. }
  1165. void gb_timesync_exit(void)
  1166. {
  1167. gb_timesync_platform_exit();
  1168. }