ldc.c 49 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378
  1. /* ldc.c: Logical Domain Channel link-layer protocol driver.
  2. *
  3. * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/module.h>
  7. #include <linux/slab.h>
  8. #include <linux/spinlock.h>
  9. #include <linux/delay.h>
  10. #include <linux/errno.h>
  11. #include <linux/string.h>
  12. #include <linux/scatterlist.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/list.h>
  15. #include <linux/init.h>
  16. #include <linux/bitmap.h>
  17. #include <asm/hypervisor.h>
  18. #include <asm/iommu.h>
  19. #include <asm/page.h>
  20. #include <asm/ldc.h>
  21. #include <asm/mdesc.h>
  22. #define DRV_MODULE_NAME "ldc"
  23. #define PFX DRV_MODULE_NAME ": "
  24. #define DRV_MODULE_VERSION "1.1"
  25. #define DRV_MODULE_RELDATE "July 22, 2008"
  26. static char version[] __devinitdata =
  27. DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
  28. #define LDC_PACKET_SIZE 64
  29. /* Packet header layout for unreliable and reliable mode frames.
  30. * When in RAW mode, packets are simply straight 64-byte payloads
  31. * with no headers.
  32. */
  33. struct ldc_packet {
  34. u8 type;
  35. #define LDC_CTRL 0x01
  36. #define LDC_DATA 0x02
  37. #define LDC_ERR 0x10
  38. u8 stype;
  39. #define LDC_INFO 0x01
  40. #define LDC_ACK 0x02
  41. #define LDC_NACK 0x04
  42. u8 ctrl;
  43. #define LDC_VERS 0x01 /* Link Version */
  44. #define LDC_RTS 0x02 /* Request To Send */
  45. #define LDC_RTR 0x03 /* Ready To Receive */
  46. #define LDC_RDX 0x04 /* Ready for Data eXchange */
  47. #define LDC_CTRL_MSK 0x0f
  48. u8 env;
  49. #define LDC_LEN 0x3f
  50. #define LDC_FRAG_MASK 0xc0
  51. #define LDC_START 0x40
  52. #define LDC_STOP 0x80
  53. u32 seqid;
  54. union {
  55. u8 u_data[LDC_PACKET_SIZE - 8];
  56. struct {
  57. u32 pad;
  58. u32 ackid;
  59. u8 r_data[LDC_PACKET_SIZE - 8 - 8];
  60. } r;
  61. } u;
  62. };
  63. struct ldc_version {
  64. u16 major;
  65. u16 minor;
  66. };
  67. /* Ordered from largest major to lowest. */
  68. static struct ldc_version ver_arr[] = {
  69. { .major = 1, .minor = 0 },
  70. };
  71. #define LDC_DEFAULT_MTU (4 * LDC_PACKET_SIZE)
  72. #define LDC_DEFAULT_NUM_ENTRIES (PAGE_SIZE / LDC_PACKET_SIZE)
  73. struct ldc_channel;
  74. struct ldc_mode_ops {
  75. int (*write)(struct ldc_channel *, const void *, unsigned int);
  76. int (*read)(struct ldc_channel *, void *, unsigned int);
  77. };
  78. static const struct ldc_mode_ops raw_ops;
  79. static const struct ldc_mode_ops nonraw_ops;
  80. static const struct ldc_mode_ops stream_ops;
  81. int ldom_domaining_enabled;
  82. struct ldc_iommu {
  83. /* Protects arena alloc/free. */
  84. spinlock_t lock;
  85. struct iommu_arena arena;
  86. struct ldc_mtable_entry *page_table;
  87. };
  88. struct ldc_channel {
  89. /* Protects all operations that depend upon channel state. */
  90. spinlock_t lock;
  91. unsigned long id;
  92. u8 *mssbuf;
  93. u32 mssbuf_len;
  94. u32 mssbuf_off;
  95. struct ldc_packet *tx_base;
  96. unsigned long tx_head;
  97. unsigned long tx_tail;
  98. unsigned long tx_num_entries;
  99. unsigned long tx_ra;
  100. unsigned long tx_acked;
  101. struct ldc_packet *rx_base;
  102. unsigned long rx_head;
  103. unsigned long rx_tail;
  104. unsigned long rx_num_entries;
  105. unsigned long rx_ra;
  106. u32 rcv_nxt;
  107. u32 snd_nxt;
  108. unsigned long chan_state;
  109. struct ldc_channel_config cfg;
  110. void *event_arg;
  111. const struct ldc_mode_ops *mops;
  112. struct ldc_iommu iommu;
  113. struct ldc_version ver;
  114. u8 hs_state;
  115. #define LDC_HS_CLOSED 0x00
  116. #define LDC_HS_OPEN 0x01
  117. #define LDC_HS_GOTVERS 0x02
  118. #define LDC_HS_SENTRTR 0x03
  119. #define LDC_HS_GOTRTR 0x04
  120. #define LDC_HS_COMPLETE 0x10
  121. u8 flags;
  122. #define LDC_FLAG_ALLOCED_QUEUES 0x01
  123. #define LDC_FLAG_REGISTERED_QUEUES 0x02
  124. #define LDC_FLAG_REGISTERED_IRQS 0x04
  125. #define LDC_FLAG_RESET 0x10
  126. u8 mss;
  127. u8 state;
  128. #define LDC_IRQ_NAME_MAX 32
  129. char rx_irq_name[LDC_IRQ_NAME_MAX];
  130. char tx_irq_name[LDC_IRQ_NAME_MAX];
  131. struct hlist_head mh_list;
  132. struct hlist_node list;
  133. };
  134. #define ldcdbg(TYPE, f, a...) \
  135. do { if (lp->cfg.debug & LDC_DEBUG_##TYPE) \
  136. printk(KERN_INFO PFX "ID[%lu] " f, lp->id, ## a); \
  137. } while (0)
  138. static const char *state_to_str(u8 state)
  139. {
  140. switch (state) {
  141. case LDC_STATE_INVALID:
  142. return "INVALID";
  143. case LDC_STATE_INIT:
  144. return "INIT";
  145. case LDC_STATE_BOUND:
  146. return "BOUND";
  147. case LDC_STATE_READY:
  148. return "READY";
  149. case LDC_STATE_CONNECTED:
  150. return "CONNECTED";
  151. default:
  152. return "<UNKNOWN>";
  153. }
  154. }
  155. static void ldc_set_state(struct ldc_channel *lp, u8 state)
  156. {
  157. ldcdbg(STATE, "STATE (%s) --> (%s)\n",
  158. state_to_str(lp->state),
  159. state_to_str(state));
  160. lp->state = state;
  161. }
  162. static unsigned long __advance(unsigned long off, unsigned long num_entries)
  163. {
  164. off += LDC_PACKET_SIZE;
  165. if (off == (num_entries * LDC_PACKET_SIZE))
  166. off = 0;
  167. return off;
  168. }
  169. static unsigned long rx_advance(struct ldc_channel *lp, unsigned long off)
  170. {
  171. return __advance(off, lp->rx_num_entries);
  172. }
  173. static unsigned long tx_advance(struct ldc_channel *lp, unsigned long off)
  174. {
  175. return __advance(off, lp->tx_num_entries);
  176. }
  177. static struct ldc_packet *handshake_get_tx_packet(struct ldc_channel *lp,
  178. unsigned long *new_tail)
  179. {
  180. struct ldc_packet *p;
  181. unsigned long t;
  182. t = tx_advance(lp, lp->tx_tail);
  183. if (t == lp->tx_head)
  184. return NULL;
  185. *new_tail = t;
  186. p = lp->tx_base;
  187. return p + (lp->tx_tail / LDC_PACKET_SIZE);
  188. }
  189. /* When we are in reliable or stream mode, have to track the next packet
  190. * we haven't gotten an ACK for in the TX queue using tx_acked. We have
  191. * to be careful not to stomp over the queue past that point. During
  192. * the handshake, we don't have TX data packets pending in the queue
  193. * and that's why handshake_get_tx_packet() need not be mindful of
  194. * lp->tx_acked.
  195. */
  196. static unsigned long head_for_data(struct ldc_channel *lp)
  197. {
  198. if (lp->cfg.mode == LDC_MODE_STREAM)
  199. return lp->tx_acked;
  200. return lp->tx_head;
  201. }
  202. static int tx_has_space_for(struct ldc_channel *lp, unsigned int size)
  203. {
  204. unsigned long limit, tail, new_tail, diff;
  205. unsigned int mss;
  206. limit = head_for_data(lp);
  207. tail = lp->tx_tail;
  208. new_tail = tx_advance(lp, tail);
  209. if (new_tail == limit)
  210. return 0;
  211. if (limit > new_tail)
  212. diff = limit - new_tail;
  213. else
  214. diff = (limit +
  215. ((lp->tx_num_entries * LDC_PACKET_SIZE) - new_tail));
  216. diff /= LDC_PACKET_SIZE;
  217. mss = lp->mss;
  218. if (diff * mss < size)
  219. return 0;
  220. return 1;
  221. }
  222. static struct ldc_packet *data_get_tx_packet(struct ldc_channel *lp,
  223. unsigned long *new_tail)
  224. {
  225. struct ldc_packet *p;
  226. unsigned long h, t;
  227. h = head_for_data(lp);
  228. t = tx_advance(lp, lp->tx_tail);
  229. if (t == h)
  230. return NULL;
  231. *new_tail = t;
  232. p = lp->tx_base;
  233. return p + (lp->tx_tail / LDC_PACKET_SIZE);
  234. }
  235. static int set_tx_tail(struct ldc_channel *lp, unsigned long tail)
  236. {
  237. unsigned long orig_tail = lp->tx_tail;
  238. int limit = 1000;
  239. lp->tx_tail = tail;
  240. while (limit-- > 0) {
  241. unsigned long err;
  242. err = sun4v_ldc_tx_set_qtail(lp->id, tail);
  243. if (!err)
  244. return 0;
  245. if (err != HV_EWOULDBLOCK) {
  246. lp->tx_tail = orig_tail;
  247. return -EINVAL;
  248. }
  249. udelay(1);
  250. }
  251. lp->tx_tail = orig_tail;
  252. return -EBUSY;
  253. }
  254. /* This just updates the head value in the hypervisor using
  255. * a polling loop with a timeout. The caller takes care of
  256. * upating software state representing the head change, if any.
  257. */
  258. static int __set_rx_head(struct ldc_channel *lp, unsigned long head)
  259. {
  260. int limit = 1000;
  261. while (limit-- > 0) {
  262. unsigned long err;
  263. err = sun4v_ldc_rx_set_qhead(lp->id, head);
  264. if (!err)
  265. return 0;
  266. if (err != HV_EWOULDBLOCK)
  267. return -EINVAL;
  268. udelay(1);
  269. }
  270. return -EBUSY;
  271. }
  272. static int send_tx_packet(struct ldc_channel *lp,
  273. struct ldc_packet *p,
  274. unsigned long new_tail)
  275. {
  276. BUG_ON(p != (lp->tx_base + (lp->tx_tail / LDC_PACKET_SIZE)));
  277. return set_tx_tail(lp, new_tail);
  278. }
  279. static struct ldc_packet *handshake_compose_ctrl(struct ldc_channel *lp,
  280. u8 stype, u8 ctrl,
  281. void *data, int dlen,
  282. unsigned long *new_tail)
  283. {
  284. struct ldc_packet *p = handshake_get_tx_packet(lp, new_tail);
  285. if (p) {
  286. memset(p, 0, sizeof(*p));
  287. p->type = LDC_CTRL;
  288. p->stype = stype;
  289. p->ctrl = ctrl;
  290. if (data)
  291. memcpy(p->u.u_data, data, dlen);
  292. }
  293. return p;
  294. }
  295. static int start_handshake(struct ldc_channel *lp)
  296. {
  297. struct ldc_packet *p;
  298. struct ldc_version *ver;
  299. unsigned long new_tail;
  300. ver = &ver_arr[0];
  301. ldcdbg(HS, "SEND VER INFO maj[%u] min[%u]\n",
  302. ver->major, ver->minor);
  303. p = handshake_compose_ctrl(lp, LDC_INFO, LDC_VERS,
  304. ver, sizeof(*ver), &new_tail);
  305. if (p) {
  306. int err = send_tx_packet(lp, p, new_tail);
  307. if (!err)
  308. lp->flags &= ~LDC_FLAG_RESET;
  309. return err;
  310. }
  311. return -EBUSY;
  312. }
  313. static int send_version_nack(struct ldc_channel *lp,
  314. u16 major, u16 minor)
  315. {
  316. struct ldc_packet *p;
  317. struct ldc_version ver;
  318. unsigned long new_tail;
  319. ver.major = major;
  320. ver.minor = minor;
  321. p = handshake_compose_ctrl(lp, LDC_NACK, LDC_VERS,
  322. &ver, sizeof(ver), &new_tail);
  323. if (p) {
  324. ldcdbg(HS, "SEND VER NACK maj[%u] min[%u]\n",
  325. ver.major, ver.minor);
  326. return send_tx_packet(lp, p, new_tail);
  327. }
  328. return -EBUSY;
  329. }
  330. static int send_version_ack(struct ldc_channel *lp,
  331. struct ldc_version *vp)
  332. {
  333. struct ldc_packet *p;
  334. unsigned long new_tail;
  335. p = handshake_compose_ctrl(lp, LDC_ACK, LDC_VERS,
  336. vp, sizeof(*vp), &new_tail);
  337. if (p) {
  338. ldcdbg(HS, "SEND VER ACK maj[%u] min[%u]\n",
  339. vp->major, vp->minor);
  340. return send_tx_packet(lp, p, new_tail);
  341. }
  342. return -EBUSY;
  343. }
  344. static int send_rts(struct ldc_channel *lp)
  345. {
  346. struct ldc_packet *p;
  347. unsigned long new_tail;
  348. p = handshake_compose_ctrl(lp, LDC_INFO, LDC_RTS, NULL, 0,
  349. &new_tail);
  350. if (p) {
  351. p->env = lp->cfg.mode;
  352. p->seqid = 0;
  353. lp->rcv_nxt = 0;
  354. ldcdbg(HS, "SEND RTS env[0x%x] seqid[0x%x]\n",
  355. p->env, p->seqid);
  356. return send_tx_packet(lp, p, new_tail);
  357. }
  358. return -EBUSY;
  359. }
  360. static int send_rtr(struct ldc_channel *lp)
  361. {
  362. struct ldc_packet *p;
  363. unsigned long new_tail;
  364. p = handshake_compose_ctrl(lp, LDC_INFO, LDC_RTR, NULL, 0,
  365. &new_tail);
  366. if (p) {
  367. p->env = lp->cfg.mode;
  368. p->seqid = 0;
  369. ldcdbg(HS, "SEND RTR env[0x%x] seqid[0x%x]\n",
  370. p->env, p->seqid);
  371. return send_tx_packet(lp, p, new_tail);
  372. }
  373. return -EBUSY;
  374. }
  375. static int send_rdx(struct ldc_channel *lp)
  376. {
  377. struct ldc_packet *p;
  378. unsigned long new_tail;
  379. p = handshake_compose_ctrl(lp, LDC_INFO, LDC_RDX, NULL, 0,
  380. &new_tail);
  381. if (p) {
  382. p->env = 0;
  383. p->seqid = ++lp->snd_nxt;
  384. p->u.r.ackid = lp->rcv_nxt;
  385. ldcdbg(HS, "SEND RDX env[0x%x] seqid[0x%x] ackid[0x%x]\n",
  386. p->env, p->seqid, p->u.r.ackid);
  387. return send_tx_packet(lp, p, new_tail);
  388. }
  389. return -EBUSY;
  390. }
  391. static int send_data_nack(struct ldc_channel *lp, struct ldc_packet *data_pkt)
  392. {
  393. struct ldc_packet *p;
  394. unsigned long new_tail;
  395. int err;
  396. p = data_get_tx_packet(lp, &new_tail);
  397. if (!p)
  398. return -EBUSY;
  399. memset(p, 0, sizeof(*p));
  400. p->type = data_pkt->type;
  401. p->stype = LDC_NACK;
  402. p->ctrl = data_pkt->ctrl & LDC_CTRL_MSK;
  403. p->seqid = lp->snd_nxt + 1;
  404. p->u.r.ackid = lp->rcv_nxt;
  405. ldcdbg(HS, "SEND DATA NACK type[0x%x] ctl[0x%x] seq[0x%x] ack[0x%x]\n",
  406. p->type, p->ctrl, p->seqid, p->u.r.ackid);
  407. err = send_tx_packet(lp, p, new_tail);
  408. if (!err)
  409. lp->snd_nxt++;
  410. return err;
  411. }
  412. static int ldc_abort(struct ldc_channel *lp)
  413. {
  414. unsigned long hv_err;
  415. ldcdbg(STATE, "ABORT\n");
  416. /* We report but do not act upon the hypervisor errors because
  417. * there really isn't much we can do if they fail at this point.
  418. */
  419. hv_err = sun4v_ldc_tx_qconf(lp->id, lp->tx_ra, lp->tx_num_entries);
  420. if (hv_err)
  421. printk(KERN_ERR PFX "ldc_abort: "
  422. "sun4v_ldc_tx_qconf(%lx,%lx,%lx) failed, err=%lu\n",
  423. lp->id, lp->tx_ra, lp->tx_num_entries, hv_err);
  424. hv_err = sun4v_ldc_tx_get_state(lp->id,
  425. &lp->tx_head,
  426. &lp->tx_tail,
  427. &lp->chan_state);
  428. if (hv_err)
  429. printk(KERN_ERR PFX "ldc_abort: "
  430. "sun4v_ldc_tx_get_state(%lx,...) failed, err=%lu\n",
  431. lp->id, hv_err);
  432. hv_err = sun4v_ldc_rx_qconf(lp->id, lp->rx_ra, lp->rx_num_entries);
  433. if (hv_err)
  434. printk(KERN_ERR PFX "ldc_abort: "
  435. "sun4v_ldc_rx_qconf(%lx,%lx,%lx) failed, err=%lu\n",
  436. lp->id, lp->rx_ra, lp->rx_num_entries, hv_err);
  437. /* Refetch the RX queue state as well, because we could be invoked
  438. * here in the queue processing context.
  439. */
  440. hv_err = sun4v_ldc_rx_get_state(lp->id,
  441. &lp->rx_head,
  442. &lp->rx_tail,
  443. &lp->chan_state);
  444. if (hv_err)
  445. printk(KERN_ERR PFX "ldc_abort: "
  446. "sun4v_ldc_rx_get_state(%lx,...) failed, err=%lu\n",
  447. lp->id, hv_err);
  448. return -ECONNRESET;
  449. }
  450. static struct ldc_version *find_by_major(u16 major)
  451. {
  452. struct ldc_version *ret = NULL;
  453. int i;
  454. for (i = 0; i < ARRAY_SIZE(ver_arr); i++) {
  455. struct ldc_version *v = &ver_arr[i];
  456. if (v->major <= major) {
  457. ret = v;
  458. break;
  459. }
  460. }
  461. return ret;
  462. }
  463. static int process_ver_info(struct ldc_channel *lp, struct ldc_version *vp)
  464. {
  465. struct ldc_version *vap;
  466. int err;
  467. ldcdbg(HS, "GOT VERSION INFO major[%x] minor[%x]\n",
  468. vp->major, vp->minor);
  469. if (lp->hs_state == LDC_HS_GOTVERS) {
  470. lp->hs_state = LDC_HS_OPEN;
  471. memset(&lp->ver, 0, sizeof(lp->ver));
  472. }
  473. vap = find_by_major(vp->major);
  474. if (!vap) {
  475. err = send_version_nack(lp, 0, 0);
  476. } else if (vap->major != vp->major) {
  477. err = send_version_nack(lp, vap->major, vap->minor);
  478. } else {
  479. struct ldc_version ver = *vp;
  480. if (ver.minor > vap->minor)
  481. ver.minor = vap->minor;
  482. err = send_version_ack(lp, &ver);
  483. if (!err) {
  484. lp->ver = ver;
  485. lp->hs_state = LDC_HS_GOTVERS;
  486. }
  487. }
  488. if (err)
  489. return ldc_abort(lp);
  490. return 0;
  491. }
  492. static int process_ver_ack(struct ldc_channel *lp, struct ldc_version *vp)
  493. {
  494. ldcdbg(HS, "GOT VERSION ACK major[%x] minor[%x]\n",
  495. vp->major, vp->minor);
  496. if (lp->hs_state == LDC_HS_GOTVERS) {
  497. if (lp->ver.major != vp->major ||
  498. lp->ver.minor != vp->minor)
  499. return ldc_abort(lp);
  500. } else {
  501. lp->ver = *vp;
  502. lp->hs_state = LDC_HS_GOTVERS;
  503. }
  504. if (send_rts(lp))
  505. return ldc_abort(lp);
  506. return 0;
  507. }
  508. static int process_ver_nack(struct ldc_channel *lp, struct ldc_version *vp)
  509. {
  510. struct ldc_version *vap;
  511. struct ldc_packet *p;
  512. unsigned long new_tail;
  513. if (vp->major == 0 && vp->minor == 0)
  514. return ldc_abort(lp);
  515. vap = find_by_major(vp->major);
  516. if (!vap)
  517. return ldc_abort(lp);
  518. p = handshake_compose_ctrl(lp, LDC_INFO, LDC_VERS,
  519. vap, sizeof(*vap),
  520. &new_tail);
  521. if (!p)
  522. return ldc_abort(lp);
  523. return send_tx_packet(lp, p, new_tail);
  524. }
  525. static int process_version(struct ldc_channel *lp,
  526. struct ldc_packet *p)
  527. {
  528. struct ldc_version *vp;
  529. vp = (struct ldc_version *) p->u.u_data;
  530. switch (p->stype) {
  531. case LDC_INFO:
  532. return process_ver_info(lp, vp);
  533. case LDC_ACK:
  534. return process_ver_ack(lp, vp);
  535. case LDC_NACK:
  536. return process_ver_nack(lp, vp);
  537. default:
  538. return ldc_abort(lp);
  539. }
  540. }
  541. static int process_rts(struct ldc_channel *lp,
  542. struct ldc_packet *p)
  543. {
  544. ldcdbg(HS, "GOT RTS stype[%x] seqid[%x] env[%x]\n",
  545. p->stype, p->seqid, p->env);
  546. if (p->stype != LDC_INFO ||
  547. lp->hs_state != LDC_HS_GOTVERS ||
  548. p->env != lp->cfg.mode)
  549. return ldc_abort(lp);
  550. lp->snd_nxt = p->seqid;
  551. lp->rcv_nxt = p->seqid;
  552. lp->hs_state = LDC_HS_SENTRTR;
  553. if (send_rtr(lp))
  554. return ldc_abort(lp);
  555. return 0;
  556. }
  557. static int process_rtr(struct ldc_channel *lp,
  558. struct ldc_packet *p)
  559. {
  560. ldcdbg(HS, "GOT RTR stype[%x] seqid[%x] env[%x]\n",
  561. p->stype, p->seqid, p->env);
  562. if (p->stype != LDC_INFO ||
  563. p->env != lp->cfg.mode)
  564. return ldc_abort(lp);
  565. lp->snd_nxt = p->seqid;
  566. lp->hs_state = LDC_HS_COMPLETE;
  567. ldc_set_state(lp, LDC_STATE_CONNECTED);
  568. send_rdx(lp);
  569. return LDC_EVENT_UP;
  570. }
  571. static int rx_seq_ok(struct ldc_channel *lp, u32 seqid)
  572. {
  573. return lp->rcv_nxt + 1 == seqid;
  574. }
  575. static int process_rdx(struct ldc_channel *lp,
  576. struct ldc_packet *p)
  577. {
  578. ldcdbg(HS, "GOT RDX stype[%x] seqid[%x] env[%x] ackid[%x]\n",
  579. p->stype, p->seqid, p->env, p->u.r.ackid);
  580. if (p->stype != LDC_INFO ||
  581. !(rx_seq_ok(lp, p->seqid)))
  582. return ldc_abort(lp);
  583. lp->rcv_nxt = p->seqid;
  584. lp->hs_state = LDC_HS_COMPLETE;
  585. ldc_set_state(lp, LDC_STATE_CONNECTED);
  586. return LDC_EVENT_UP;
  587. }
  588. static int process_control_frame(struct ldc_channel *lp,
  589. struct ldc_packet *p)
  590. {
  591. switch (p->ctrl) {
  592. case LDC_VERS:
  593. return process_version(lp, p);
  594. case LDC_RTS:
  595. return process_rts(lp, p);
  596. case LDC_RTR:
  597. return process_rtr(lp, p);
  598. case LDC_RDX:
  599. return process_rdx(lp, p);
  600. default:
  601. return ldc_abort(lp);
  602. }
  603. }
  604. static int process_error_frame(struct ldc_channel *lp,
  605. struct ldc_packet *p)
  606. {
  607. return ldc_abort(lp);
  608. }
  609. static int process_data_ack(struct ldc_channel *lp,
  610. struct ldc_packet *ack)
  611. {
  612. unsigned long head = lp->tx_acked;
  613. u32 ackid = ack->u.r.ackid;
  614. while (1) {
  615. struct ldc_packet *p = lp->tx_base + (head / LDC_PACKET_SIZE);
  616. head = tx_advance(lp, head);
  617. if (p->seqid == ackid) {
  618. lp->tx_acked = head;
  619. return 0;
  620. }
  621. if (head == lp->tx_tail)
  622. return ldc_abort(lp);
  623. }
  624. return 0;
  625. }
  626. static void send_events(struct ldc_channel *lp, unsigned int event_mask)
  627. {
  628. if (event_mask & LDC_EVENT_RESET)
  629. lp->cfg.event(lp->event_arg, LDC_EVENT_RESET);
  630. if (event_mask & LDC_EVENT_UP)
  631. lp->cfg.event(lp->event_arg, LDC_EVENT_UP);
  632. if (event_mask & LDC_EVENT_DATA_READY)
  633. lp->cfg.event(lp->event_arg, LDC_EVENT_DATA_READY);
  634. }
  635. static irqreturn_t ldc_rx(int irq, void *dev_id)
  636. {
  637. struct ldc_channel *lp = dev_id;
  638. unsigned long orig_state, flags;
  639. unsigned int event_mask;
  640. spin_lock_irqsave(&lp->lock, flags);
  641. orig_state = lp->chan_state;
  642. /* We should probably check for hypervisor errors here and
  643. * reset the LDC channel if we get one.
  644. */
  645. sun4v_ldc_rx_get_state(lp->id,
  646. &lp->rx_head,
  647. &lp->rx_tail,
  648. &lp->chan_state);
  649. ldcdbg(RX, "RX state[0x%02lx:0x%02lx] head[0x%04lx] tail[0x%04lx]\n",
  650. orig_state, lp->chan_state, lp->rx_head, lp->rx_tail);
  651. event_mask = 0;
  652. if (lp->cfg.mode == LDC_MODE_RAW &&
  653. lp->chan_state == LDC_CHANNEL_UP) {
  654. lp->hs_state = LDC_HS_COMPLETE;
  655. ldc_set_state(lp, LDC_STATE_CONNECTED);
  656. event_mask |= LDC_EVENT_UP;
  657. orig_state = lp->chan_state;
  658. }
  659. /* If we are in reset state, flush the RX queue and ignore
  660. * everything.
  661. */
  662. if (lp->flags & LDC_FLAG_RESET) {
  663. (void) __set_rx_head(lp, lp->rx_tail);
  664. goto out;
  665. }
  666. /* Once we finish the handshake, we let the ldc_read()
  667. * paths do all of the control frame and state management.
  668. * Just trigger the callback.
  669. */
  670. if (lp->hs_state == LDC_HS_COMPLETE) {
  671. handshake_complete:
  672. if (lp->chan_state != orig_state) {
  673. unsigned int event = LDC_EVENT_RESET;
  674. if (lp->chan_state == LDC_CHANNEL_UP)
  675. event = LDC_EVENT_UP;
  676. event_mask |= event;
  677. }
  678. if (lp->rx_head != lp->rx_tail)
  679. event_mask |= LDC_EVENT_DATA_READY;
  680. goto out;
  681. }
  682. if (lp->chan_state != orig_state)
  683. goto out;
  684. while (lp->rx_head != lp->rx_tail) {
  685. struct ldc_packet *p;
  686. unsigned long new;
  687. int err;
  688. p = lp->rx_base + (lp->rx_head / LDC_PACKET_SIZE);
  689. switch (p->type) {
  690. case LDC_CTRL:
  691. err = process_control_frame(lp, p);
  692. if (err > 0)
  693. event_mask |= err;
  694. break;
  695. case LDC_DATA:
  696. event_mask |= LDC_EVENT_DATA_READY;
  697. err = 0;
  698. break;
  699. case LDC_ERR:
  700. err = process_error_frame(lp, p);
  701. break;
  702. default:
  703. err = ldc_abort(lp);
  704. break;
  705. }
  706. if (err < 0)
  707. break;
  708. new = lp->rx_head;
  709. new += LDC_PACKET_SIZE;
  710. if (new == (lp->rx_num_entries * LDC_PACKET_SIZE))
  711. new = 0;
  712. lp->rx_head = new;
  713. err = __set_rx_head(lp, new);
  714. if (err < 0) {
  715. (void) ldc_abort(lp);
  716. break;
  717. }
  718. if (lp->hs_state == LDC_HS_COMPLETE)
  719. goto handshake_complete;
  720. }
  721. out:
  722. spin_unlock_irqrestore(&lp->lock, flags);
  723. send_events(lp, event_mask);
  724. return IRQ_HANDLED;
  725. }
  726. static irqreturn_t ldc_tx(int irq, void *dev_id)
  727. {
  728. struct ldc_channel *lp = dev_id;
  729. unsigned long flags, orig_state;
  730. unsigned int event_mask = 0;
  731. spin_lock_irqsave(&lp->lock, flags);
  732. orig_state = lp->chan_state;
  733. /* We should probably check for hypervisor errors here and
  734. * reset the LDC channel if we get one.
  735. */
  736. sun4v_ldc_tx_get_state(lp->id,
  737. &lp->tx_head,
  738. &lp->tx_tail,
  739. &lp->chan_state);
  740. ldcdbg(TX, " TX state[0x%02lx:0x%02lx] head[0x%04lx] tail[0x%04lx]\n",
  741. orig_state, lp->chan_state, lp->tx_head, lp->tx_tail);
  742. if (lp->cfg.mode == LDC_MODE_RAW &&
  743. lp->chan_state == LDC_CHANNEL_UP) {
  744. lp->hs_state = LDC_HS_COMPLETE;
  745. ldc_set_state(lp, LDC_STATE_CONNECTED);
  746. event_mask |= LDC_EVENT_UP;
  747. }
  748. spin_unlock_irqrestore(&lp->lock, flags);
  749. send_events(lp, event_mask);
  750. return IRQ_HANDLED;
  751. }
  752. /* XXX ldc_alloc() and ldc_free() needs to run under a mutex so
  753. * XXX that addition and removal from the ldc_channel_list has
  754. * XXX atomicity, otherwise the __ldc_channel_exists() check is
  755. * XXX totally pointless as another thread can slip into ldc_alloc()
  756. * XXX and add a channel with the same ID. There also needs to be
  757. * XXX a spinlock for ldc_channel_list.
  758. */
  759. static HLIST_HEAD(ldc_channel_list);
  760. static int __ldc_channel_exists(unsigned long id)
  761. {
  762. struct ldc_channel *lp;
  763. struct hlist_node *n;
  764. hlist_for_each_entry(lp, n, &ldc_channel_list, list) {
  765. if (lp->id == id)
  766. return 1;
  767. }
  768. return 0;
  769. }
  770. static int alloc_queue(const char *name, unsigned long num_entries,
  771. struct ldc_packet **base, unsigned long *ra)
  772. {
  773. unsigned long size, order;
  774. void *q;
  775. size = num_entries * LDC_PACKET_SIZE;
  776. order = get_order(size);
  777. q = (void *) __get_free_pages(GFP_KERNEL, order);
  778. if (!q) {
  779. printk(KERN_ERR PFX "Alloc of %s queue failed with "
  780. "size=%lu order=%lu\n", name, size, order);
  781. return -ENOMEM;
  782. }
  783. memset(q, 0, PAGE_SIZE << order);
  784. *base = q;
  785. *ra = __pa(q);
  786. return 0;
  787. }
  788. static void free_queue(unsigned long num_entries, struct ldc_packet *q)
  789. {
  790. unsigned long size, order;
  791. if (!q)
  792. return;
  793. size = num_entries * LDC_PACKET_SIZE;
  794. order = get_order(size);
  795. free_pages((unsigned long)q, order);
  796. }
  797. /* XXX Make this configurable... XXX */
  798. #define LDC_IOTABLE_SIZE (8 * 1024)
  799. static int ldc_iommu_init(struct ldc_channel *lp)
  800. {
  801. unsigned long sz, num_tsb_entries, tsbsize, order;
  802. struct ldc_iommu *iommu = &lp->iommu;
  803. struct ldc_mtable_entry *table;
  804. unsigned long hv_err;
  805. int err;
  806. num_tsb_entries = LDC_IOTABLE_SIZE;
  807. tsbsize = num_tsb_entries * sizeof(struct ldc_mtable_entry);
  808. spin_lock_init(&iommu->lock);
  809. sz = num_tsb_entries / 8;
  810. sz = (sz + 7UL) & ~7UL;
  811. iommu->arena.map = kzalloc(sz, GFP_KERNEL);
  812. if (!iommu->arena.map) {
  813. printk(KERN_ERR PFX "Alloc of arena map failed, sz=%lu\n", sz);
  814. return -ENOMEM;
  815. }
  816. iommu->arena.limit = num_tsb_entries;
  817. order = get_order(tsbsize);
  818. table = (struct ldc_mtable_entry *)
  819. __get_free_pages(GFP_KERNEL, order);
  820. err = -ENOMEM;
  821. if (!table) {
  822. printk(KERN_ERR PFX "Alloc of MTE table failed, "
  823. "size=%lu order=%lu\n", tsbsize, order);
  824. goto out_free_map;
  825. }
  826. memset(table, 0, PAGE_SIZE << order);
  827. iommu->page_table = table;
  828. hv_err = sun4v_ldc_set_map_table(lp->id, __pa(table),
  829. num_tsb_entries);
  830. err = -EINVAL;
  831. if (hv_err)
  832. goto out_free_table;
  833. return 0;
  834. out_free_table:
  835. free_pages((unsigned long) table, order);
  836. iommu->page_table = NULL;
  837. out_free_map:
  838. kfree(iommu->arena.map);
  839. iommu->arena.map = NULL;
  840. return err;
  841. }
  842. static void ldc_iommu_release(struct ldc_channel *lp)
  843. {
  844. struct ldc_iommu *iommu = &lp->iommu;
  845. unsigned long num_tsb_entries, tsbsize, order;
  846. (void) sun4v_ldc_set_map_table(lp->id, 0, 0);
  847. num_tsb_entries = iommu->arena.limit;
  848. tsbsize = num_tsb_entries * sizeof(struct ldc_mtable_entry);
  849. order = get_order(tsbsize);
  850. free_pages((unsigned long) iommu->page_table, order);
  851. iommu->page_table = NULL;
  852. kfree(iommu->arena.map);
  853. iommu->arena.map = NULL;
  854. }
  855. struct ldc_channel *ldc_alloc(unsigned long id,
  856. const struct ldc_channel_config *cfgp,
  857. void *event_arg)
  858. {
  859. struct ldc_channel *lp;
  860. const struct ldc_mode_ops *mops;
  861. unsigned long dummy1, dummy2, hv_err;
  862. u8 mss, *mssbuf;
  863. int err;
  864. err = -ENODEV;
  865. if (!ldom_domaining_enabled)
  866. goto out_err;
  867. err = -EINVAL;
  868. if (!cfgp)
  869. goto out_err;
  870. switch (cfgp->mode) {
  871. case LDC_MODE_RAW:
  872. mops = &raw_ops;
  873. mss = LDC_PACKET_SIZE;
  874. break;
  875. case LDC_MODE_UNRELIABLE:
  876. mops = &nonraw_ops;
  877. mss = LDC_PACKET_SIZE - 8;
  878. break;
  879. case LDC_MODE_STREAM:
  880. mops = &stream_ops;
  881. mss = LDC_PACKET_SIZE - 8 - 8;
  882. break;
  883. default:
  884. goto out_err;
  885. }
  886. if (!cfgp->event || !event_arg || !cfgp->rx_irq || !cfgp->tx_irq)
  887. goto out_err;
  888. hv_err = sun4v_ldc_tx_qinfo(id, &dummy1, &dummy2);
  889. err = -ENODEV;
  890. if (hv_err == HV_ECHANNEL)
  891. goto out_err;
  892. err = -EEXIST;
  893. if (__ldc_channel_exists(id))
  894. goto out_err;
  895. mssbuf = NULL;
  896. lp = kzalloc(sizeof(*lp), GFP_KERNEL);
  897. err = -ENOMEM;
  898. if (!lp)
  899. goto out_err;
  900. spin_lock_init(&lp->lock);
  901. lp->id = id;
  902. err = ldc_iommu_init(lp);
  903. if (err)
  904. goto out_free_ldc;
  905. lp->mops = mops;
  906. lp->mss = mss;
  907. lp->cfg = *cfgp;
  908. if (!lp->cfg.mtu)
  909. lp->cfg.mtu = LDC_DEFAULT_MTU;
  910. if (lp->cfg.mode == LDC_MODE_STREAM) {
  911. mssbuf = kzalloc(lp->cfg.mtu, GFP_KERNEL);
  912. if (!mssbuf) {
  913. err = -ENOMEM;
  914. goto out_free_iommu;
  915. }
  916. lp->mssbuf = mssbuf;
  917. }
  918. lp->event_arg = event_arg;
  919. /* XXX allow setting via ldc_channel_config to override defaults
  920. * XXX or use some formula based upon mtu
  921. */
  922. lp->tx_num_entries = LDC_DEFAULT_NUM_ENTRIES;
  923. lp->rx_num_entries = LDC_DEFAULT_NUM_ENTRIES;
  924. err = alloc_queue("TX", lp->tx_num_entries,
  925. &lp->tx_base, &lp->tx_ra);
  926. if (err)
  927. goto out_free_mssbuf;
  928. err = alloc_queue("RX", lp->rx_num_entries,
  929. &lp->rx_base, &lp->rx_ra);
  930. if (err)
  931. goto out_free_txq;
  932. lp->flags |= LDC_FLAG_ALLOCED_QUEUES;
  933. lp->hs_state = LDC_HS_CLOSED;
  934. ldc_set_state(lp, LDC_STATE_INIT);
  935. INIT_HLIST_NODE(&lp->list);
  936. hlist_add_head(&lp->list, &ldc_channel_list);
  937. INIT_HLIST_HEAD(&lp->mh_list);
  938. return lp;
  939. out_free_txq:
  940. free_queue(lp->tx_num_entries, lp->tx_base);
  941. out_free_mssbuf:
  942. kfree(mssbuf);
  943. out_free_iommu:
  944. ldc_iommu_release(lp);
  945. out_free_ldc:
  946. kfree(lp);
  947. out_err:
  948. return ERR_PTR(err);
  949. }
  950. EXPORT_SYMBOL(ldc_alloc);
  951. void ldc_free(struct ldc_channel *lp)
  952. {
  953. if (lp->flags & LDC_FLAG_REGISTERED_IRQS) {
  954. free_irq(lp->cfg.rx_irq, lp);
  955. free_irq(lp->cfg.tx_irq, lp);
  956. }
  957. if (lp->flags & LDC_FLAG_REGISTERED_QUEUES) {
  958. sun4v_ldc_tx_qconf(lp->id, 0, 0);
  959. sun4v_ldc_rx_qconf(lp->id, 0, 0);
  960. lp->flags &= ~LDC_FLAG_REGISTERED_QUEUES;
  961. }
  962. if (lp->flags & LDC_FLAG_ALLOCED_QUEUES) {
  963. free_queue(lp->tx_num_entries, lp->tx_base);
  964. free_queue(lp->rx_num_entries, lp->rx_base);
  965. lp->flags &= ~LDC_FLAG_ALLOCED_QUEUES;
  966. }
  967. hlist_del(&lp->list);
  968. kfree(lp->mssbuf);
  969. ldc_iommu_release(lp);
  970. kfree(lp);
  971. }
  972. EXPORT_SYMBOL(ldc_free);
  973. /* Bind the channel. This registers the LDC queues with
  974. * the hypervisor and puts the channel into a pseudo-listening
  975. * state. This does not initiate a handshake, ldc_connect() does
  976. * that.
  977. */
  978. int ldc_bind(struct ldc_channel *lp, const char *name)
  979. {
  980. unsigned long hv_err, flags;
  981. int err = -EINVAL;
  982. if (!name ||
  983. (lp->state != LDC_STATE_INIT))
  984. return -EINVAL;
  985. snprintf(lp->rx_irq_name, LDC_IRQ_NAME_MAX, "%s RX", name);
  986. snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name);
  987. err = request_irq(lp->cfg.rx_irq, ldc_rx,
  988. IRQF_SAMPLE_RANDOM | IRQF_DISABLED,
  989. lp->rx_irq_name, lp);
  990. if (err)
  991. return err;
  992. err = request_irq(lp->cfg.tx_irq, ldc_tx,
  993. IRQF_SAMPLE_RANDOM | IRQF_DISABLED,
  994. lp->tx_irq_name, lp);
  995. if (err) {
  996. free_irq(lp->cfg.rx_irq, lp);
  997. return err;
  998. }
  999. spin_lock_irqsave(&lp->lock, flags);
  1000. enable_irq(lp->cfg.rx_irq);
  1001. enable_irq(lp->cfg.tx_irq);
  1002. lp->flags |= LDC_FLAG_REGISTERED_IRQS;
  1003. err = -ENODEV;
  1004. hv_err = sun4v_ldc_tx_qconf(lp->id, 0, 0);
  1005. if (hv_err)
  1006. goto out_free_irqs;
  1007. hv_err = sun4v_ldc_tx_qconf(lp->id, lp->tx_ra, lp->tx_num_entries);
  1008. if (hv_err)
  1009. goto out_free_irqs;
  1010. hv_err = sun4v_ldc_rx_qconf(lp->id, 0, 0);
  1011. if (hv_err)
  1012. goto out_unmap_tx;
  1013. hv_err = sun4v_ldc_rx_qconf(lp->id, lp->rx_ra, lp->rx_num_entries);
  1014. if (hv_err)
  1015. goto out_unmap_tx;
  1016. lp->flags |= LDC_FLAG_REGISTERED_QUEUES;
  1017. hv_err = sun4v_ldc_tx_get_state(lp->id,
  1018. &lp->tx_head,
  1019. &lp->tx_tail,
  1020. &lp->chan_state);
  1021. err = -EBUSY;
  1022. if (hv_err)
  1023. goto out_unmap_rx;
  1024. lp->tx_acked = lp->tx_head;
  1025. lp->hs_state = LDC_HS_OPEN;
  1026. ldc_set_state(lp, LDC_STATE_BOUND);
  1027. spin_unlock_irqrestore(&lp->lock, flags);
  1028. return 0;
  1029. out_unmap_rx:
  1030. lp->flags &= ~LDC_FLAG_REGISTERED_QUEUES;
  1031. sun4v_ldc_rx_qconf(lp->id, 0, 0);
  1032. out_unmap_tx:
  1033. sun4v_ldc_tx_qconf(lp->id, 0, 0);
  1034. out_free_irqs:
  1035. lp->flags &= ~LDC_FLAG_REGISTERED_IRQS;
  1036. free_irq(lp->cfg.tx_irq, lp);
  1037. free_irq(lp->cfg.rx_irq, lp);
  1038. spin_unlock_irqrestore(&lp->lock, flags);
  1039. return err;
  1040. }
  1041. EXPORT_SYMBOL(ldc_bind);
  1042. int ldc_connect(struct ldc_channel *lp)
  1043. {
  1044. unsigned long flags;
  1045. int err;
  1046. if (lp->cfg.mode == LDC_MODE_RAW)
  1047. return -EINVAL;
  1048. spin_lock_irqsave(&lp->lock, flags);
  1049. if (!(lp->flags & LDC_FLAG_ALLOCED_QUEUES) ||
  1050. !(lp->flags & LDC_FLAG_REGISTERED_QUEUES) ||
  1051. lp->hs_state != LDC_HS_OPEN)
  1052. err = -EINVAL;
  1053. else
  1054. err = start_handshake(lp);
  1055. spin_unlock_irqrestore(&lp->lock, flags);
  1056. return err;
  1057. }
  1058. EXPORT_SYMBOL(ldc_connect);
  1059. int ldc_disconnect(struct ldc_channel *lp)
  1060. {
  1061. unsigned long hv_err, flags;
  1062. int err;
  1063. if (lp->cfg.mode == LDC_MODE_RAW)
  1064. return -EINVAL;
  1065. if (!(lp->flags & LDC_FLAG_ALLOCED_QUEUES) ||
  1066. !(lp->flags & LDC_FLAG_REGISTERED_QUEUES))
  1067. return -EINVAL;
  1068. spin_lock_irqsave(&lp->lock, flags);
  1069. err = -ENODEV;
  1070. hv_err = sun4v_ldc_tx_qconf(lp->id, 0, 0);
  1071. if (hv_err)
  1072. goto out_err;
  1073. hv_err = sun4v_ldc_tx_qconf(lp->id, lp->tx_ra, lp->tx_num_entries);
  1074. if (hv_err)
  1075. goto out_err;
  1076. hv_err = sun4v_ldc_rx_qconf(lp->id, 0, 0);
  1077. if (hv_err)
  1078. goto out_err;
  1079. hv_err = sun4v_ldc_rx_qconf(lp->id, lp->rx_ra, lp->rx_num_entries);
  1080. if (hv_err)
  1081. goto out_err;
  1082. ldc_set_state(lp, LDC_STATE_BOUND);
  1083. lp->hs_state = LDC_HS_OPEN;
  1084. lp->flags |= LDC_FLAG_RESET;
  1085. spin_unlock_irqrestore(&lp->lock, flags);
  1086. return 0;
  1087. out_err:
  1088. sun4v_ldc_tx_qconf(lp->id, 0, 0);
  1089. sun4v_ldc_rx_qconf(lp->id, 0, 0);
  1090. free_irq(lp->cfg.tx_irq, lp);
  1091. free_irq(lp->cfg.rx_irq, lp);
  1092. lp->flags &= ~(LDC_FLAG_REGISTERED_IRQS |
  1093. LDC_FLAG_REGISTERED_QUEUES);
  1094. ldc_set_state(lp, LDC_STATE_INIT);
  1095. spin_unlock_irqrestore(&lp->lock, flags);
  1096. return err;
  1097. }
  1098. EXPORT_SYMBOL(ldc_disconnect);
  1099. int ldc_state(struct ldc_channel *lp)
  1100. {
  1101. return lp->state;
  1102. }
  1103. EXPORT_SYMBOL(ldc_state);
  1104. static int write_raw(struct ldc_channel *lp, const void *buf, unsigned int size)
  1105. {
  1106. struct ldc_packet *p;
  1107. unsigned long new_tail;
  1108. int err;
  1109. if (size > LDC_PACKET_SIZE)
  1110. return -EMSGSIZE;
  1111. p = data_get_tx_packet(lp, &new_tail);
  1112. if (!p)
  1113. return -EAGAIN;
  1114. memcpy(p, buf, size);
  1115. err = send_tx_packet(lp, p, new_tail);
  1116. if (!err)
  1117. err = size;
  1118. return err;
  1119. }
  1120. static int read_raw(struct ldc_channel *lp, void *buf, unsigned int size)
  1121. {
  1122. struct ldc_packet *p;
  1123. unsigned long hv_err, new;
  1124. int err;
  1125. if (size < LDC_PACKET_SIZE)
  1126. return -EINVAL;
  1127. hv_err = sun4v_ldc_rx_get_state(lp->id,
  1128. &lp->rx_head,
  1129. &lp->rx_tail,
  1130. &lp->chan_state);
  1131. if (hv_err)
  1132. return ldc_abort(lp);
  1133. if (lp->chan_state == LDC_CHANNEL_DOWN ||
  1134. lp->chan_state == LDC_CHANNEL_RESETTING)
  1135. return -ECONNRESET;
  1136. if (lp->rx_head == lp->rx_tail)
  1137. return 0;
  1138. p = lp->rx_base + (lp->rx_head / LDC_PACKET_SIZE);
  1139. memcpy(buf, p, LDC_PACKET_SIZE);
  1140. new = rx_advance(lp, lp->rx_head);
  1141. lp->rx_head = new;
  1142. err = __set_rx_head(lp, new);
  1143. if (err < 0)
  1144. err = -ECONNRESET;
  1145. else
  1146. err = LDC_PACKET_SIZE;
  1147. return err;
  1148. }
  1149. static const struct ldc_mode_ops raw_ops = {
  1150. .write = write_raw,
  1151. .read = read_raw,
  1152. };
  1153. static int write_nonraw(struct ldc_channel *lp, const void *buf,
  1154. unsigned int size)
  1155. {
  1156. unsigned long hv_err, tail;
  1157. unsigned int copied;
  1158. u32 seq;
  1159. int err;
  1160. hv_err = sun4v_ldc_tx_get_state(lp->id, &lp->tx_head, &lp->tx_tail,
  1161. &lp->chan_state);
  1162. if (unlikely(hv_err))
  1163. return -EBUSY;
  1164. if (unlikely(lp->chan_state != LDC_CHANNEL_UP))
  1165. return ldc_abort(lp);
  1166. if (!tx_has_space_for(lp, size))
  1167. return -EAGAIN;
  1168. seq = lp->snd_nxt;
  1169. copied = 0;
  1170. tail = lp->tx_tail;
  1171. while (copied < size) {
  1172. struct ldc_packet *p = lp->tx_base + (tail / LDC_PACKET_SIZE);
  1173. u8 *data = ((lp->cfg.mode == LDC_MODE_UNRELIABLE) ?
  1174. p->u.u_data :
  1175. p->u.r.r_data);
  1176. int data_len;
  1177. p->type = LDC_DATA;
  1178. p->stype = LDC_INFO;
  1179. p->ctrl = 0;
  1180. data_len = size - copied;
  1181. if (data_len > lp->mss)
  1182. data_len = lp->mss;
  1183. BUG_ON(data_len > LDC_LEN);
  1184. p->env = (data_len |
  1185. (copied == 0 ? LDC_START : 0) |
  1186. (data_len == size - copied ? LDC_STOP : 0));
  1187. p->seqid = ++seq;
  1188. ldcdbg(DATA, "SENT DATA [%02x:%02x:%02x:%02x:%08x]\n",
  1189. p->type,
  1190. p->stype,
  1191. p->ctrl,
  1192. p->env,
  1193. p->seqid);
  1194. memcpy(data, buf, data_len);
  1195. buf += data_len;
  1196. copied += data_len;
  1197. tail = tx_advance(lp, tail);
  1198. }
  1199. err = set_tx_tail(lp, tail);
  1200. if (!err) {
  1201. lp->snd_nxt = seq;
  1202. err = size;
  1203. }
  1204. return err;
  1205. }
  1206. static int rx_bad_seq(struct ldc_channel *lp, struct ldc_packet *p,
  1207. struct ldc_packet *first_frag)
  1208. {
  1209. int err;
  1210. if (first_frag)
  1211. lp->rcv_nxt = first_frag->seqid - 1;
  1212. err = send_data_nack(lp, p);
  1213. if (err)
  1214. return err;
  1215. err = __set_rx_head(lp, lp->rx_tail);
  1216. if (err < 0)
  1217. return ldc_abort(lp);
  1218. return 0;
  1219. }
  1220. static int data_ack_nack(struct ldc_channel *lp, struct ldc_packet *p)
  1221. {
  1222. if (p->stype & LDC_ACK) {
  1223. int err = process_data_ack(lp, p);
  1224. if (err)
  1225. return err;
  1226. }
  1227. if (p->stype & LDC_NACK)
  1228. return ldc_abort(lp);
  1229. return 0;
  1230. }
  1231. static int rx_data_wait(struct ldc_channel *lp, unsigned long cur_head)
  1232. {
  1233. unsigned long dummy;
  1234. int limit = 1000;
  1235. ldcdbg(DATA, "DATA WAIT cur_head[%lx] rx_head[%lx] rx_tail[%lx]\n",
  1236. cur_head, lp->rx_head, lp->rx_tail);
  1237. while (limit-- > 0) {
  1238. unsigned long hv_err;
  1239. hv_err = sun4v_ldc_rx_get_state(lp->id,
  1240. &dummy,
  1241. &lp->rx_tail,
  1242. &lp->chan_state);
  1243. if (hv_err)
  1244. return ldc_abort(lp);
  1245. if (lp->chan_state == LDC_CHANNEL_DOWN ||
  1246. lp->chan_state == LDC_CHANNEL_RESETTING)
  1247. return -ECONNRESET;
  1248. if (cur_head != lp->rx_tail) {
  1249. ldcdbg(DATA, "DATA WAIT DONE "
  1250. "head[%lx] tail[%lx] chan_state[%lx]\n",
  1251. dummy, lp->rx_tail, lp->chan_state);
  1252. return 0;
  1253. }
  1254. udelay(1);
  1255. }
  1256. return -EAGAIN;
  1257. }
  1258. static int rx_set_head(struct ldc_channel *lp, unsigned long head)
  1259. {
  1260. int err = __set_rx_head(lp, head);
  1261. if (err < 0)
  1262. return ldc_abort(lp);
  1263. lp->rx_head = head;
  1264. return 0;
  1265. }
  1266. static void send_data_ack(struct ldc_channel *lp)
  1267. {
  1268. unsigned long new_tail;
  1269. struct ldc_packet *p;
  1270. p = data_get_tx_packet(lp, &new_tail);
  1271. if (likely(p)) {
  1272. int err;
  1273. memset(p, 0, sizeof(*p));
  1274. p->type = LDC_DATA;
  1275. p->stype = LDC_ACK;
  1276. p->ctrl = 0;
  1277. p->seqid = lp->snd_nxt + 1;
  1278. p->u.r.ackid = lp->rcv_nxt;
  1279. err = send_tx_packet(lp, p, new_tail);
  1280. if (!err)
  1281. lp->snd_nxt++;
  1282. }
  1283. }
  1284. static int read_nonraw(struct ldc_channel *lp, void *buf, unsigned int size)
  1285. {
  1286. struct ldc_packet *first_frag;
  1287. unsigned long hv_err, new;
  1288. int err, copied;
  1289. hv_err = sun4v_ldc_rx_get_state(lp->id,
  1290. &lp->rx_head,
  1291. &lp->rx_tail,
  1292. &lp->chan_state);
  1293. if (hv_err)
  1294. return ldc_abort(lp);
  1295. if (lp->chan_state == LDC_CHANNEL_DOWN ||
  1296. lp->chan_state == LDC_CHANNEL_RESETTING)
  1297. return -ECONNRESET;
  1298. if (lp->rx_head == lp->rx_tail)
  1299. return 0;
  1300. first_frag = NULL;
  1301. copied = err = 0;
  1302. new = lp->rx_head;
  1303. while (1) {
  1304. struct ldc_packet *p;
  1305. int pkt_len;
  1306. BUG_ON(new == lp->rx_tail);
  1307. p = lp->rx_base + (new / LDC_PACKET_SIZE);
  1308. ldcdbg(RX, "RX read pkt[%02x:%02x:%02x:%02x:%08x:%08x] "
  1309. "rcv_nxt[%08x]\n",
  1310. p->type,
  1311. p->stype,
  1312. p->ctrl,
  1313. p->env,
  1314. p->seqid,
  1315. p->u.r.ackid,
  1316. lp->rcv_nxt);
  1317. if (unlikely(!rx_seq_ok(lp, p->seqid))) {
  1318. err = rx_bad_seq(lp, p, first_frag);
  1319. copied = 0;
  1320. break;
  1321. }
  1322. if (p->type & LDC_CTRL) {
  1323. err = process_control_frame(lp, p);
  1324. if (err < 0)
  1325. break;
  1326. err = 0;
  1327. }
  1328. lp->rcv_nxt = p->seqid;
  1329. if (!(p->type & LDC_DATA)) {
  1330. new = rx_advance(lp, new);
  1331. goto no_data;
  1332. }
  1333. if (p->stype & (LDC_ACK | LDC_NACK)) {
  1334. err = data_ack_nack(lp, p);
  1335. if (err)
  1336. break;
  1337. }
  1338. if (!(p->stype & LDC_INFO)) {
  1339. new = rx_advance(lp, new);
  1340. err = rx_set_head(lp, new);
  1341. if (err)
  1342. break;
  1343. goto no_data;
  1344. }
  1345. pkt_len = p->env & LDC_LEN;
  1346. /* Every initial packet starts with the START bit set.
  1347. *
  1348. * Singleton packets will have both START+STOP set.
  1349. *
  1350. * Fragments will have START set in the first frame, STOP
  1351. * set in the last frame, and neither bit set in middle
  1352. * frames of the packet.
  1353. *
  1354. * Therefore if we are at the beginning of a packet and
  1355. * we don't see START, or we are in the middle of a fragmented
  1356. * packet and do see START, we are unsynchronized and should
  1357. * flush the RX queue.
  1358. */
  1359. if ((first_frag == NULL && !(p->env & LDC_START)) ||
  1360. (first_frag != NULL && (p->env & LDC_START))) {
  1361. if (!first_frag)
  1362. new = rx_advance(lp, new);
  1363. err = rx_set_head(lp, new);
  1364. if (err)
  1365. break;
  1366. if (!first_frag)
  1367. goto no_data;
  1368. }
  1369. if (!first_frag)
  1370. first_frag = p;
  1371. if (pkt_len > size - copied) {
  1372. /* User didn't give us a big enough buffer,
  1373. * what to do? This is a pretty serious error.
  1374. *
  1375. * Since we haven't updated the RX ring head to
  1376. * consume any of the packets, signal the error
  1377. * to the user and just leave the RX ring alone.
  1378. *
  1379. * This seems the best behavior because this allows
  1380. * a user of the LDC layer to start with a small
  1381. * RX buffer for ldc_read() calls and use -EMSGSIZE
  1382. * as a cue to enlarge it's read buffer.
  1383. */
  1384. err = -EMSGSIZE;
  1385. break;
  1386. }
  1387. /* Ok, we are gonna eat this one. */
  1388. new = rx_advance(lp, new);
  1389. memcpy(buf,
  1390. (lp->cfg.mode == LDC_MODE_UNRELIABLE ?
  1391. p->u.u_data : p->u.r.r_data), pkt_len);
  1392. buf += pkt_len;
  1393. copied += pkt_len;
  1394. if (p->env & LDC_STOP)
  1395. break;
  1396. no_data:
  1397. if (new == lp->rx_tail) {
  1398. err = rx_data_wait(lp, new);
  1399. if (err)
  1400. break;
  1401. }
  1402. }
  1403. if (!err)
  1404. err = rx_set_head(lp, new);
  1405. if (err && first_frag)
  1406. lp->rcv_nxt = first_frag->seqid - 1;
  1407. if (!err) {
  1408. err = copied;
  1409. if (err > 0 && lp->cfg.mode != LDC_MODE_UNRELIABLE)
  1410. send_data_ack(lp);
  1411. }
  1412. return err;
  1413. }
  1414. static const struct ldc_mode_ops nonraw_ops = {
  1415. .write = write_nonraw,
  1416. .read = read_nonraw,
  1417. };
  1418. static int write_stream(struct ldc_channel *lp, const void *buf,
  1419. unsigned int size)
  1420. {
  1421. if (size > lp->cfg.mtu)
  1422. size = lp->cfg.mtu;
  1423. return write_nonraw(lp, buf, size);
  1424. }
  1425. static int read_stream(struct ldc_channel *lp, void *buf, unsigned int size)
  1426. {
  1427. if (!lp->mssbuf_len) {
  1428. int err = read_nonraw(lp, lp->mssbuf, lp->cfg.mtu);
  1429. if (err < 0)
  1430. return err;
  1431. lp->mssbuf_len = err;
  1432. lp->mssbuf_off = 0;
  1433. }
  1434. if (size > lp->mssbuf_len)
  1435. size = lp->mssbuf_len;
  1436. memcpy(buf, lp->mssbuf + lp->mssbuf_off, size);
  1437. lp->mssbuf_off += size;
  1438. lp->mssbuf_len -= size;
  1439. return size;
  1440. }
  1441. static const struct ldc_mode_ops stream_ops = {
  1442. .write = write_stream,
  1443. .read = read_stream,
  1444. };
  1445. int ldc_write(struct ldc_channel *lp, const void *buf, unsigned int size)
  1446. {
  1447. unsigned long flags;
  1448. int err;
  1449. if (!buf)
  1450. return -EINVAL;
  1451. if (!size)
  1452. return 0;
  1453. spin_lock_irqsave(&lp->lock, flags);
  1454. if (lp->hs_state != LDC_HS_COMPLETE)
  1455. err = -ENOTCONN;
  1456. else
  1457. err = lp->mops->write(lp, buf, size);
  1458. spin_unlock_irqrestore(&lp->lock, flags);
  1459. return err;
  1460. }
  1461. EXPORT_SYMBOL(ldc_write);
  1462. int ldc_read(struct ldc_channel *lp, void *buf, unsigned int size)
  1463. {
  1464. unsigned long flags;
  1465. int err;
  1466. if (!buf)
  1467. return -EINVAL;
  1468. if (!size)
  1469. return 0;
  1470. spin_lock_irqsave(&lp->lock, flags);
  1471. if (lp->hs_state != LDC_HS_COMPLETE)
  1472. err = -ENOTCONN;
  1473. else
  1474. err = lp->mops->read(lp, buf, size);
  1475. spin_unlock_irqrestore(&lp->lock, flags);
  1476. return err;
  1477. }
  1478. EXPORT_SYMBOL(ldc_read);
  1479. static long arena_alloc(struct ldc_iommu *iommu, unsigned long npages)
  1480. {
  1481. struct iommu_arena *arena = &iommu->arena;
  1482. unsigned long n, start, end, limit;
  1483. int pass;
  1484. limit = arena->limit;
  1485. start = arena->hint;
  1486. pass = 0;
  1487. again:
  1488. n = bitmap_find_next_zero_area(arena->map, limit, start, npages, 0);
  1489. end = n + npages;
  1490. if (unlikely(end >= limit)) {
  1491. if (likely(pass < 1)) {
  1492. limit = start;
  1493. start = 0;
  1494. pass++;
  1495. goto again;
  1496. } else {
  1497. /* Scanned the whole thing, give up. */
  1498. return -1;
  1499. }
  1500. }
  1501. bitmap_set(arena->map, n, npages);
  1502. arena->hint = end;
  1503. return n;
  1504. }
  1505. #define COOKIE_PGSZ_CODE 0xf000000000000000ULL
  1506. #define COOKIE_PGSZ_CODE_SHIFT 60ULL
  1507. static u64 pagesize_code(void)
  1508. {
  1509. switch (PAGE_SIZE) {
  1510. default:
  1511. case (8ULL * 1024ULL):
  1512. return 0;
  1513. case (64ULL * 1024ULL):
  1514. return 1;
  1515. case (512ULL * 1024ULL):
  1516. return 2;
  1517. case (4ULL * 1024ULL * 1024ULL):
  1518. return 3;
  1519. case (32ULL * 1024ULL * 1024ULL):
  1520. return 4;
  1521. case (256ULL * 1024ULL * 1024ULL):
  1522. return 5;
  1523. }
  1524. }
  1525. static u64 make_cookie(u64 index, u64 pgsz_code, u64 page_offset)
  1526. {
  1527. return ((pgsz_code << COOKIE_PGSZ_CODE_SHIFT) |
  1528. (index << PAGE_SHIFT) |
  1529. page_offset);
  1530. }
  1531. static u64 cookie_to_index(u64 cookie, unsigned long *shift)
  1532. {
  1533. u64 szcode = cookie >> COOKIE_PGSZ_CODE_SHIFT;
  1534. cookie &= ~COOKIE_PGSZ_CODE;
  1535. *shift = szcode * 3;
  1536. return (cookie >> (13ULL + (szcode * 3ULL)));
  1537. }
  1538. static struct ldc_mtable_entry *alloc_npages(struct ldc_iommu *iommu,
  1539. unsigned long npages)
  1540. {
  1541. long entry;
  1542. entry = arena_alloc(iommu, npages);
  1543. if (unlikely(entry < 0))
  1544. return NULL;
  1545. return iommu->page_table + entry;
  1546. }
  1547. static u64 perm_to_mte(unsigned int map_perm)
  1548. {
  1549. u64 mte_base;
  1550. mte_base = pagesize_code();
  1551. if (map_perm & LDC_MAP_SHADOW) {
  1552. if (map_perm & LDC_MAP_R)
  1553. mte_base |= LDC_MTE_COPY_R;
  1554. if (map_perm & LDC_MAP_W)
  1555. mte_base |= LDC_MTE_COPY_W;
  1556. }
  1557. if (map_perm & LDC_MAP_DIRECT) {
  1558. if (map_perm & LDC_MAP_R)
  1559. mte_base |= LDC_MTE_READ;
  1560. if (map_perm & LDC_MAP_W)
  1561. mte_base |= LDC_MTE_WRITE;
  1562. if (map_perm & LDC_MAP_X)
  1563. mte_base |= LDC_MTE_EXEC;
  1564. }
  1565. if (map_perm & LDC_MAP_IO) {
  1566. if (map_perm & LDC_MAP_R)
  1567. mte_base |= LDC_MTE_IOMMU_R;
  1568. if (map_perm & LDC_MAP_W)
  1569. mte_base |= LDC_MTE_IOMMU_W;
  1570. }
  1571. return mte_base;
  1572. }
  1573. static int pages_in_region(unsigned long base, long len)
  1574. {
  1575. int count = 0;
  1576. do {
  1577. unsigned long new = (base + PAGE_SIZE) & PAGE_MASK;
  1578. len -= (new - base);
  1579. base = new;
  1580. count++;
  1581. } while (len > 0);
  1582. return count;
  1583. }
  1584. struct cookie_state {
  1585. struct ldc_mtable_entry *page_table;
  1586. struct ldc_trans_cookie *cookies;
  1587. u64 mte_base;
  1588. u64 prev_cookie;
  1589. u32 pte_idx;
  1590. u32 nc;
  1591. };
  1592. static void fill_cookies(struct cookie_state *sp, unsigned long pa,
  1593. unsigned long off, unsigned long len)
  1594. {
  1595. do {
  1596. unsigned long tlen, new = pa + PAGE_SIZE;
  1597. u64 this_cookie;
  1598. sp->page_table[sp->pte_idx].mte = sp->mte_base | pa;
  1599. tlen = PAGE_SIZE;
  1600. if (off)
  1601. tlen = PAGE_SIZE - off;
  1602. if (tlen > len)
  1603. tlen = len;
  1604. this_cookie = make_cookie(sp->pte_idx,
  1605. pagesize_code(), off);
  1606. off = 0;
  1607. if (this_cookie == sp->prev_cookie) {
  1608. sp->cookies[sp->nc - 1].cookie_size += tlen;
  1609. } else {
  1610. sp->cookies[sp->nc].cookie_addr = this_cookie;
  1611. sp->cookies[sp->nc].cookie_size = tlen;
  1612. sp->nc++;
  1613. }
  1614. sp->prev_cookie = this_cookie + tlen;
  1615. sp->pte_idx++;
  1616. len -= tlen;
  1617. pa = new;
  1618. } while (len > 0);
  1619. }
  1620. static int sg_count_one(struct scatterlist *sg)
  1621. {
  1622. unsigned long base = page_to_pfn(sg_page(sg)) << PAGE_SHIFT;
  1623. long len = sg->length;
  1624. if ((sg->offset | len) & (8UL - 1))
  1625. return -EFAULT;
  1626. return pages_in_region(base + sg->offset, len);
  1627. }
  1628. static int sg_count_pages(struct scatterlist *sg, int num_sg)
  1629. {
  1630. int count;
  1631. int i;
  1632. count = 0;
  1633. for (i = 0; i < num_sg; i++) {
  1634. int err = sg_count_one(sg + i);
  1635. if (err < 0)
  1636. return err;
  1637. count += err;
  1638. }
  1639. return count;
  1640. }
  1641. int ldc_map_sg(struct ldc_channel *lp,
  1642. struct scatterlist *sg, int num_sg,
  1643. struct ldc_trans_cookie *cookies, int ncookies,
  1644. unsigned int map_perm)
  1645. {
  1646. unsigned long i, npages, flags;
  1647. struct ldc_mtable_entry *base;
  1648. struct cookie_state state;
  1649. struct ldc_iommu *iommu;
  1650. int err;
  1651. if (map_perm & ~LDC_MAP_ALL)
  1652. return -EINVAL;
  1653. err = sg_count_pages(sg, num_sg);
  1654. if (err < 0)
  1655. return err;
  1656. npages = err;
  1657. if (err > ncookies)
  1658. return -EMSGSIZE;
  1659. iommu = &lp->iommu;
  1660. spin_lock_irqsave(&iommu->lock, flags);
  1661. base = alloc_npages(iommu, npages);
  1662. spin_unlock_irqrestore(&iommu->lock, flags);
  1663. if (!base)
  1664. return -ENOMEM;
  1665. state.page_table = iommu->page_table;
  1666. state.cookies = cookies;
  1667. state.mte_base = perm_to_mte(map_perm);
  1668. state.prev_cookie = ~(u64)0;
  1669. state.pte_idx = (base - iommu->page_table);
  1670. state.nc = 0;
  1671. for (i = 0; i < num_sg; i++)
  1672. fill_cookies(&state, page_to_pfn(sg_page(&sg[i])) << PAGE_SHIFT,
  1673. sg[i].offset, sg[i].length);
  1674. return state.nc;
  1675. }
  1676. EXPORT_SYMBOL(ldc_map_sg);
  1677. int ldc_map_single(struct ldc_channel *lp,
  1678. void *buf, unsigned int len,
  1679. struct ldc_trans_cookie *cookies, int ncookies,
  1680. unsigned int map_perm)
  1681. {
  1682. unsigned long npages, pa, flags;
  1683. struct ldc_mtable_entry *base;
  1684. struct cookie_state state;
  1685. struct ldc_iommu *iommu;
  1686. if ((map_perm & ~LDC_MAP_ALL) || (ncookies < 1))
  1687. return -EINVAL;
  1688. pa = __pa(buf);
  1689. if ((pa | len) & (8UL - 1))
  1690. return -EFAULT;
  1691. npages = pages_in_region(pa, len);
  1692. iommu = &lp->iommu;
  1693. spin_lock_irqsave(&iommu->lock, flags);
  1694. base = alloc_npages(iommu, npages);
  1695. spin_unlock_irqrestore(&iommu->lock, flags);
  1696. if (!base)
  1697. return -ENOMEM;
  1698. state.page_table = iommu->page_table;
  1699. state.cookies = cookies;
  1700. state.mte_base = perm_to_mte(map_perm);
  1701. state.prev_cookie = ~(u64)0;
  1702. state.pte_idx = (base - iommu->page_table);
  1703. state.nc = 0;
  1704. fill_cookies(&state, (pa & PAGE_MASK), (pa & ~PAGE_MASK), len);
  1705. BUG_ON(state.nc != 1);
  1706. return state.nc;
  1707. }
  1708. EXPORT_SYMBOL(ldc_map_single);
  1709. static void free_npages(unsigned long id, struct ldc_iommu *iommu,
  1710. u64 cookie, u64 size)
  1711. {
  1712. struct iommu_arena *arena = &iommu->arena;
  1713. unsigned long i, shift, index, npages;
  1714. struct ldc_mtable_entry *base;
  1715. npages = PAGE_ALIGN(((cookie & ~PAGE_MASK) + size)) >> PAGE_SHIFT;
  1716. index = cookie_to_index(cookie, &shift);
  1717. base = iommu->page_table + index;
  1718. BUG_ON(index > arena->limit ||
  1719. (index + npages) > arena->limit);
  1720. for (i = 0; i < npages; i++) {
  1721. if (base->cookie)
  1722. sun4v_ldc_revoke(id, cookie + (i << shift),
  1723. base->cookie);
  1724. base->mte = 0;
  1725. __clear_bit(index + i, arena->map);
  1726. }
  1727. }
  1728. void ldc_unmap(struct ldc_channel *lp, struct ldc_trans_cookie *cookies,
  1729. int ncookies)
  1730. {
  1731. struct ldc_iommu *iommu = &lp->iommu;
  1732. unsigned long flags;
  1733. int i;
  1734. spin_lock_irqsave(&iommu->lock, flags);
  1735. for (i = 0; i < ncookies; i++) {
  1736. u64 addr = cookies[i].cookie_addr;
  1737. u64 size = cookies[i].cookie_size;
  1738. free_npages(lp->id, iommu, addr, size);
  1739. }
  1740. spin_unlock_irqrestore(&iommu->lock, flags);
  1741. }
  1742. EXPORT_SYMBOL(ldc_unmap);
  1743. int ldc_copy(struct ldc_channel *lp, int copy_dir,
  1744. void *buf, unsigned int len, unsigned long offset,
  1745. struct ldc_trans_cookie *cookies, int ncookies)
  1746. {
  1747. unsigned int orig_len;
  1748. unsigned long ra;
  1749. int i;
  1750. if (copy_dir != LDC_COPY_IN && copy_dir != LDC_COPY_OUT) {
  1751. printk(KERN_ERR PFX "ldc_copy: ID[%lu] Bad copy_dir[%d]\n",
  1752. lp->id, copy_dir);
  1753. return -EINVAL;
  1754. }
  1755. ra = __pa(buf);
  1756. if ((ra | len | offset) & (8UL - 1)) {
  1757. printk(KERN_ERR PFX "ldc_copy: ID[%lu] Unaligned buffer "
  1758. "ra[%lx] len[%x] offset[%lx]\n",
  1759. lp->id, ra, len, offset);
  1760. return -EFAULT;
  1761. }
  1762. if (lp->hs_state != LDC_HS_COMPLETE ||
  1763. (lp->flags & LDC_FLAG_RESET)) {
  1764. printk(KERN_ERR PFX "ldc_copy: ID[%lu] Link down hs_state[%x] "
  1765. "flags[%x]\n", lp->id, lp->hs_state, lp->flags);
  1766. return -ECONNRESET;
  1767. }
  1768. orig_len = len;
  1769. for (i = 0; i < ncookies; i++) {
  1770. unsigned long cookie_raddr = cookies[i].cookie_addr;
  1771. unsigned long this_len = cookies[i].cookie_size;
  1772. unsigned long actual_len;
  1773. if (unlikely(offset)) {
  1774. unsigned long this_off = offset;
  1775. if (this_off > this_len)
  1776. this_off = this_len;
  1777. offset -= this_off;
  1778. this_len -= this_off;
  1779. if (!this_len)
  1780. continue;
  1781. cookie_raddr += this_off;
  1782. }
  1783. if (this_len > len)
  1784. this_len = len;
  1785. while (1) {
  1786. unsigned long hv_err;
  1787. hv_err = sun4v_ldc_copy(lp->id, copy_dir,
  1788. cookie_raddr, ra,
  1789. this_len, &actual_len);
  1790. if (unlikely(hv_err)) {
  1791. printk(KERN_ERR PFX "ldc_copy: ID[%lu] "
  1792. "HV error %lu\n",
  1793. lp->id, hv_err);
  1794. if (lp->hs_state != LDC_HS_COMPLETE ||
  1795. (lp->flags & LDC_FLAG_RESET))
  1796. return -ECONNRESET;
  1797. else
  1798. return -EFAULT;
  1799. }
  1800. cookie_raddr += actual_len;
  1801. ra += actual_len;
  1802. len -= actual_len;
  1803. if (actual_len == this_len)
  1804. break;
  1805. this_len -= actual_len;
  1806. }
  1807. if (!len)
  1808. break;
  1809. }
  1810. /* It is caller policy what to do about short copies.
  1811. * For example, a networking driver can declare the
  1812. * packet a runt and drop it.
  1813. */
  1814. return orig_len - len;
  1815. }
  1816. EXPORT_SYMBOL(ldc_copy);
  1817. void *ldc_alloc_exp_dring(struct ldc_channel *lp, unsigned int len,
  1818. struct ldc_trans_cookie *cookies, int *ncookies,
  1819. unsigned int map_perm)
  1820. {
  1821. void *buf;
  1822. int err;
  1823. if (len & (8UL - 1))
  1824. return ERR_PTR(-EINVAL);
  1825. buf = kzalloc(len, GFP_KERNEL);
  1826. if (!buf)
  1827. return ERR_PTR(-ENOMEM);
  1828. err = ldc_map_single(lp, buf, len, cookies, *ncookies, map_perm);
  1829. if (err < 0) {
  1830. kfree(buf);
  1831. return ERR_PTR(err);
  1832. }
  1833. *ncookies = err;
  1834. return buf;
  1835. }
  1836. EXPORT_SYMBOL(ldc_alloc_exp_dring);
  1837. void ldc_free_exp_dring(struct ldc_channel *lp, void *buf, unsigned int len,
  1838. struct ldc_trans_cookie *cookies, int ncookies)
  1839. {
  1840. ldc_unmap(lp, cookies, ncookies);
  1841. kfree(buf);
  1842. }
  1843. EXPORT_SYMBOL(ldc_free_exp_dring);
  1844. static int __init ldc_init(void)
  1845. {
  1846. unsigned long major, minor;
  1847. struct mdesc_handle *hp;
  1848. const u64 *v;
  1849. int err;
  1850. u64 mp;
  1851. hp = mdesc_grab();
  1852. if (!hp)
  1853. return -ENODEV;
  1854. mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "platform");
  1855. err = -ENODEV;
  1856. if (mp == MDESC_NODE_NULL)
  1857. goto out;
  1858. v = mdesc_get_property(hp, mp, "domaining-enabled", NULL);
  1859. if (!v)
  1860. goto out;
  1861. major = 1;
  1862. minor = 0;
  1863. if (sun4v_hvapi_register(HV_GRP_LDOM, major, &minor)) {
  1864. printk(KERN_INFO PFX "Could not register LDOM hvapi.\n");
  1865. goto out;
  1866. }
  1867. printk(KERN_INFO "%s", version);
  1868. if (!*v) {
  1869. printk(KERN_INFO PFX "Domaining disabled.\n");
  1870. goto out;
  1871. }
  1872. ldom_domaining_enabled = 1;
  1873. err = 0;
  1874. out:
  1875. mdesc_release(hp);
  1876. return err;
  1877. }
  1878. core_initcall(ldc_init);