sclp.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128
  1. /*
  2. * core function to access sclp interface
  3. *
  4. * Copyright IBM Corp. 1999, 2009
  5. *
  6. * Author(s): Martin Peschke <mpeschke@de.ibm.com>
  7. * Martin Schwidefsky <schwidefsky@de.ibm.com>
  8. */
  9. #include <linux/kernel_stat.h>
  10. #include <linux/module.h>
  11. #include <linux/err.h>
  12. #include <linux/spinlock.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/timer.h>
  15. #include <linux/reboot.h>
  16. #include <linux/jiffies.h>
  17. #include <linux/init.h>
  18. #include <linux/suspend.h>
  19. #include <linux/completion.h>
  20. #include <linux/platform_device.h>
  21. #include <asm/types.h>
  22. #include <asm/irq.h>
  23. #include "sclp.h"
  24. #define SCLP_HEADER "sclp: "
  25. /* Lock to protect internal data consistency. */
  26. static DEFINE_SPINLOCK(sclp_lock);
  27. /* Mask of events that we can send to the sclp interface. */
  28. static sccb_mask_t sclp_receive_mask;
  29. /* Mask of events that we can receive from the sclp interface. */
  30. static sccb_mask_t sclp_send_mask;
  31. /* List of registered event listeners and senders. */
  32. static struct list_head sclp_reg_list;
  33. /* List of queued requests. */
  34. static struct list_head sclp_req_queue;
  35. /* Data for read and and init requests. */
  36. static struct sclp_req sclp_read_req;
  37. static struct sclp_req sclp_init_req;
  38. static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
  39. static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
  40. /* Suspend request */
  41. static DECLARE_COMPLETION(sclp_request_queue_flushed);
  42. static void sclp_suspend_req_cb(struct sclp_req *req, void *data)
  43. {
  44. complete(&sclp_request_queue_flushed);
  45. }
  46. static struct sclp_req sclp_suspend_req;
  47. /* Timer for request retries. */
  48. static struct timer_list sclp_request_timer;
  49. /* Internal state: is the driver initialized? */
  50. static volatile enum sclp_init_state_t {
  51. sclp_init_state_uninitialized,
  52. sclp_init_state_initializing,
  53. sclp_init_state_initialized
  54. } sclp_init_state = sclp_init_state_uninitialized;
  55. /* Internal state: is a request active at the sclp? */
  56. static volatile enum sclp_running_state_t {
  57. sclp_running_state_idle,
  58. sclp_running_state_running,
  59. sclp_running_state_reset_pending
  60. } sclp_running_state = sclp_running_state_idle;
  61. /* Internal state: is a read request pending? */
  62. static volatile enum sclp_reading_state_t {
  63. sclp_reading_state_idle,
  64. sclp_reading_state_reading
  65. } sclp_reading_state = sclp_reading_state_idle;
  66. /* Internal state: is the driver currently serving requests? */
  67. static volatile enum sclp_activation_state_t {
  68. sclp_activation_state_active,
  69. sclp_activation_state_deactivating,
  70. sclp_activation_state_inactive,
  71. sclp_activation_state_activating
  72. } sclp_activation_state = sclp_activation_state_active;
  73. /* Internal state: is an init mask request pending? */
  74. static volatile enum sclp_mask_state_t {
  75. sclp_mask_state_idle,
  76. sclp_mask_state_initializing
  77. } sclp_mask_state = sclp_mask_state_idle;
  78. /* Internal state: is the driver suspended? */
  79. static enum sclp_suspend_state_t {
  80. sclp_suspend_state_running,
  81. sclp_suspend_state_suspended,
  82. } sclp_suspend_state = sclp_suspend_state_running;
  83. /* Maximum retry counts */
  84. #define SCLP_INIT_RETRY 3
  85. #define SCLP_MASK_RETRY 3
  86. /* Timeout intervals in seconds.*/
  87. #define SCLP_BUSY_INTERVAL 10
  88. #define SCLP_RETRY_INTERVAL 30
  89. static void sclp_process_queue(void);
  90. static void __sclp_make_read_req(void);
  91. static int sclp_init_mask(int calculate);
  92. static int sclp_init(void);
  93. /* Perform service call. Return 0 on success, non-zero otherwise. */
  94. int
  95. sclp_service_call(sclp_cmdw_t command, void *sccb)
  96. {
  97. int cc;
  98. asm volatile(
  99. " .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */
  100. " ipm %0\n"
  101. " srl %0,28"
  102. : "=&d" (cc) : "d" (command), "a" (__pa(sccb))
  103. : "cc", "memory");
  104. if (cc == 3)
  105. return -EIO;
  106. if (cc == 2)
  107. return -EBUSY;
  108. return 0;
  109. }
  110. static void
  111. __sclp_queue_read_req(void)
  112. {
  113. if (sclp_reading_state == sclp_reading_state_idle) {
  114. sclp_reading_state = sclp_reading_state_reading;
  115. __sclp_make_read_req();
  116. /* Add request to head of queue */
  117. list_add(&sclp_read_req.list, &sclp_req_queue);
  118. }
  119. }
  120. /* Set up request retry timer. Called while sclp_lock is locked. */
  121. static inline void
  122. __sclp_set_request_timer(unsigned long time, void (*function)(unsigned long),
  123. unsigned long data)
  124. {
  125. del_timer(&sclp_request_timer);
  126. sclp_request_timer.function = function;
  127. sclp_request_timer.data = data;
  128. sclp_request_timer.expires = jiffies + time;
  129. add_timer(&sclp_request_timer);
  130. }
  131. /* Request timeout handler. Restart the request queue. If DATA is non-zero,
  132. * force restart of running request. */
  133. static void
  134. sclp_request_timeout(unsigned long data)
  135. {
  136. unsigned long flags;
  137. spin_lock_irqsave(&sclp_lock, flags);
  138. if (data) {
  139. if (sclp_running_state == sclp_running_state_running) {
  140. /* Break running state and queue NOP read event request
  141. * to get a defined interface state. */
  142. __sclp_queue_read_req();
  143. sclp_running_state = sclp_running_state_idle;
  144. }
  145. } else {
  146. __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
  147. sclp_request_timeout, 0);
  148. }
  149. spin_unlock_irqrestore(&sclp_lock, flags);
  150. sclp_process_queue();
  151. }
  152. /* Try to start a request. Return zero if the request was successfully
  153. * started or if it will be started at a later time. Return non-zero otherwise.
  154. * Called while sclp_lock is locked. */
  155. static int
  156. __sclp_start_request(struct sclp_req *req)
  157. {
  158. int rc;
  159. if (sclp_running_state != sclp_running_state_idle)
  160. return 0;
  161. del_timer(&sclp_request_timer);
  162. rc = sclp_service_call(req->command, req->sccb);
  163. req->start_count++;
  164. if (rc == 0) {
  165. /* Successfully started request */
  166. req->status = SCLP_REQ_RUNNING;
  167. sclp_running_state = sclp_running_state_running;
  168. __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
  169. sclp_request_timeout, 1);
  170. return 0;
  171. } else if (rc == -EBUSY) {
  172. /* Try again later */
  173. __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
  174. sclp_request_timeout, 0);
  175. return 0;
  176. }
  177. /* Request failed */
  178. req->status = SCLP_REQ_FAILED;
  179. return rc;
  180. }
  181. /* Try to start queued requests. */
  182. static void
  183. sclp_process_queue(void)
  184. {
  185. struct sclp_req *req;
  186. int rc;
  187. unsigned long flags;
  188. spin_lock_irqsave(&sclp_lock, flags);
  189. if (sclp_running_state != sclp_running_state_idle) {
  190. spin_unlock_irqrestore(&sclp_lock, flags);
  191. return;
  192. }
  193. del_timer(&sclp_request_timer);
  194. while (!list_empty(&sclp_req_queue)) {
  195. req = list_entry(sclp_req_queue.next, struct sclp_req, list);
  196. if (!req->sccb)
  197. goto do_post;
  198. rc = __sclp_start_request(req);
  199. if (rc == 0)
  200. break;
  201. /* Request failed */
  202. if (req->start_count > 1) {
  203. /* Cannot abort already submitted request - could still
  204. * be active at the SCLP */
  205. __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
  206. sclp_request_timeout, 0);
  207. break;
  208. }
  209. do_post:
  210. /* Post-processing for aborted request */
  211. list_del(&req->list);
  212. if (req->callback) {
  213. spin_unlock_irqrestore(&sclp_lock, flags);
  214. req->callback(req, req->callback_data);
  215. spin_lock_irqsave(&sclp_lock, flags);
  216. }
  217. }
  218. spin_unlock_irqrestore(&sclp_lock, flags);
  219. }
  220. static int __sclp_can_add_request(struct sclp_req *req)
  221. {
  222. if (req == &sclp_suspend_req || req == &sclp_init_req)
  223. return 1;
  224. if (sclp_suspend_state != sclp_suspend_state_running)
  225. return 0;
  226. if (sclp_init_state != sclp_init_state_initialized)
  227. return 0;
  228. if (sclp_activation_state != sclp_activation_state_active)
  229. return 0;
  230. return 1;
  231. }
  232. /* Queue a new request. Return zero on success, non-zero otherwise. */
  233. int
  234. sclp_add_request(struct sclp_req *req)
  235. {
  236. unsigned long flags;
  237. int rc;
  238. spin_lock_irqsave(&sclp_lock, flags);
  239. if (!__sclp_can_add_request(req)) {
  240. spin_unlock_irqrestore(&sclp_lock, flags);
  241. return -EIO;
  242. }
  243. req->status = SCLP_REQ_QUEUED;
  244. req->start_count = 0;
  245. list_add_tail(&req->list, &sclp_req_queue);
  246. rc = 0;
  247. /* Start if request is first in list */
  248. if (sclp_running_state == sclp_running_state_idle &&
  249. req->list.prev == &sclp_req_queue) {
  250. if (!req->sccb) {
  251. list_del(&req->list);
  252. rc = -ENODATA;
  253. goto out;
  254. }
  255. rc = __sclp_start_request(req);
  256. if (rc)
  257. list_del(&req->list);
  258. }
  259. out:
  260. spin_unlock_irqrestore(&sclp_lock, flags);
  261. return rc;
  262. }
  263. EXPORT_SYMBOL(sclp_add_request);
  264. /* Dispatch events found in request buffer to registered listeners. Return 0
  265. * if all events were dispatched, non-zero otherwise. */
  266. static int
  267. sclp_dispatch_evbufs(struct sccb_header *sccb)
  268. {
  269. unsigned long flags;
  270. struct evbuf_header *evbuf;
  271. struct list_head *l;
  272. struct sclp_register *reg;
  273. int offset;
  274. int rc;
  275. spin_lock_irqsave(&sclp_lock, flags);
  276. rc = 0;
  277. for (offset = sizeof(struct sccb_header); offset < sccb->length;
  278. offset += evbuf->length) {
  279. evbuf = (struct evbuf_header *) ((addr_t) sccb + offset);
  280. /* Check for malformed hardware response */
  281. if (evbuf->length == 0)
  282. break;
  283. /* Search for event handler */
  284. reg = NULL;
  285. list_for_each(l, &sclp_reg_list) {
  286. reg = list_entry(l, struct sclp_register, list);
  287. if (reg->receive_mask & (1 << (32 - evbuf->type)))
  288. break;
  289. else
  290. reg = NULL;
  291. }
  292. if (reg && reg->receiver_fn) {
  293. spin_unlock_irqrestore(&sclp_lock, flags);
  294. reg->receiver_fn(evbuf);
  295. spin_lock_irqsave(&sclp_lock, flags);
  296. } else if (reg == NULL)
  297. rc = -ENOSYS;
  298. }
  299. spin_unlock_irqrestore(&sclp_lock, flags);
  300. return rc;
  301. }
  302. /* Read event data request callback. */
  303. static void
  304. sclp_read_cb(struct sclp_req *req, void *data)
  305. {
  306. unsigned long flags;
  307. struct sccb_header *sccb;
  308. sccb = (struct sccb_header *) req->sccb;
  309. if (req->status == SCLP_REQ_DONE && (sccb->response_code == 0x20 ||
  310. sccb->response_code == 0x220))
  311. sclp_dispatch_evbufs(sccb);
  312. spin_lock_irqsave(&sclp_lock, flags);
  313. sclp_reading_state = sclp_reading_state_idle;
  314. spin_unlock_irqrestore(&sclp_lock, flags);
  315. }
  316. /* Prepare read event data request. Called while sclp_lock is locked. */
  317. static void __sclp_make_read_req(void)
  318. {
  319. struct sccb_header *sccb;
  320. sccb = (struct sccb_header *) sclp_read_sccb;
  321. clear_page(sccb);
  322. memset(&sclp_read_req, 0, sizeof(struct sclp_req));
  323. sclp_read_req.command = SCLP_CMDW_READ_EVENT_DATA;
  324. sclp_read_req.status = SCLP_REQ_QUEUED;
  325. sclp_read_req.start_count = 0;
  326. sclp_read_req.callback = sclp_read_cb;
  327. sclp_read_req.sccb = sccb;
  328. sccb->length = PAGE_SIZE;
  329. sccb->function_code = 0;
  330. sccb->control_mask[2] = 0x80;
  331. }
  332. /* Search request list for request with matching sccb. Return request if found,
  333. * NULL otherwise. Called while sclp_lock is locked. */
  334. static inline struct sclp_req *
  335. __sclp_find_req(u32 sccb)
  336. {
  337. struct list_head *l;
  338. struct sclp_req *req;
  339. list_for_each(l, &sclp_req_queue) {
  340. req = list_entry(l, struct sclp_req, list);
  341. if (sccb == (u32) (addr_t) req->sccb)
  342. return req;
  343. }
  344. return NULL;
  345. }
  346. /* Handler for external interruption. Perform request post-processing.
  347. * Prepare read event data request if necessary. Start processing of next
  348. * request on queue. */
  349. static void sclp_interrupt_handler(struct ext_code ext_code,
  350. unsigned int param32, unsigned long param64)
  351. {
  352. struct sclp_req *req;
  353. u32 finished_sccb;
  354. u32 evbuf_pending;
  355. kstat_cpu(smp_processor_id()).irqs[EXTINT_SCP]++;
  356. spin_lock(&sclp_lock);
  357. finished_sccb = param32 & 0xfffffff8;
  358. evbuf_pending = param32 & 0x3;
  359. if (finished_sccb) {
  360. del_timer(&sclp_request_timer);
  361. sclp_running_state = sclp_running_state_reset_pending;
  362. req = __sclp_find_req(finished_sccb);
  363. if (req) {
  364. /* Request post-processing */
  365. list_del(&req->list);
  366. req->status = SCLP_REQ_DONE;
  367. if (req->callback) {
  368. spin_unlock(&sclp_lock);
  369. req->callback(req, req->callback_data);
  370. spin_lock(&sclp_lock);
  371. }
  372. }
  373. sclp_running_state = sclp_running_state_idle;
  374. }
  375. if (evbuf_pending &&
  376. sclp_activation_state == sclp_activation_state_active)
  377. __sclp_queue_read_req();
  378. spin_unlock(&sclp_lock);
  379. sclp_process_queue();
  380. }
  381. /* Convert interval in jiffies to TOD ticks. */
  382. static inline u64
  383. sclp_tod_from_jiffies(unsigned long jiffies)
  384. {
  385. return (u64) (jiffies / HZ) << 32;
  386. }
  387. /* Wait until a currently running request finished. Note: while this function
  388. * is running, no timers are served on the calling CPU. */
  389. void
  390. sclp_sync_wait(void)
  391. {
  392. unsigned long long old_tick;
  393. unsigned long flags;
  394. unsigned long cr0, cr0_sync;
  395. u64 timeout;
  396. int irq_context;
  397. /* We'll be disabling timer interrupts, so we need a custom timeout
  398. * mechanism */
  399. timeout = 0;
  400. if (timer_pending(&sclp_request_timer)) {
  401. /* Get timeout TOD value */
  402. timeout = get_clock() +
  403. sclp_tod_from_jiffies(sclp_request_timer.expires -
  404. jiffies);
  405. }
  406. local_irq_save(flags);
  407. /* Prevent bottom half from executing once we force interrupts open */
  408. irq_context = in_interrupt();
  409. if (!irq_context)
  410. local_bh_disable();
  411. /* Enable service-signal interruption, disable timer interrupts */
  412. old_tick = local_tick_disable();
  413. trace_hardirqs_on();
  414. __ctl_store(cr0, 0, 0);
  415. cr0_sync = cr0;
  416. cr0_sync &= 0xffff00a0;
  417. cr0_sync |= 0x00000200;
  418. __ctl_load(cr0_sync, 0, 0);
  419. __arch_local_irq_stosm(0x01);
  420. /* Loop until driver state indicates finished request */
  421. while (sclp_running_state != sclp_running_state_idle) {
  422. /* Check for expired request timer */
  423. if (timer_pending(&sclp_request_timer) &&
  424. get_clock() > timeout &&
  425. del_timer(&sclp_request_timer))
  426. sclp_request_timer.function(sclp_request_timer.data);
  427. cpu_relax();
  428. }
  429. local_irq_disable();
  430. __ctl_load(cr0, 0, 0);
  431. if (!irq_context)
  432. _local_bh_enable();
  433. local_tick_enable(old_tick);
  434. local_irq_restore(flags);
  435. }
  436. EXPORT_SYMBOL(sclp_sync_wait);
  437. /* Dispatch changes in send and receive mask to registered listeners. */
  438. static void
  439. sclp_dispatch_state_change(void)
  440. {
  441. struct list_head *l;
  442. struct sclp_register *reg;
  443. unsigned long flags;
  444. sccb_mask_t receive_mask;
  445. sccb_mask_t send_mask;
  446. do {
  447. spin_lock_irqsave(&sclp_lock, flags);
  448. reg = NULL;
  449. list_for_each(l, &sclp_reg_list) {
  450. reg = list_entry(l, struct sclp_register, list);
  451. receive_mask = reg->send_mask & sclp_receive_mask;
  452. send_mask = reg->receive_mask & sclp_send_mask;
  453. if (reg->sclp_receive_mask != receive_mask ||
  454. reg->sclp_send_mask != send_mask) {
  455. reg->sclp_receive_mask = receive_mask;
  456. reg->sclp_send_mask = send_mask;
  457. break;
  458. } else
  459. reg = NULL;
  460. }
  461. spin_unlock_irqrestore(&sclp_lock, flags);
  462. if (reg && reg->state_change_fn)
  463. reg->state_change_fn(reg);
  464. } while (reg);
  465. }
  466. struct sclp_statechangebuf {
  467. struct evbuf_header header;
  468. u8 validity_sclp_active_facility_mask : 1;
  469. u8 validity_sclp_receive_mask : 1;
  470. u8 validity_sclp_send_mask : 1;
  471. u8 validity_read_data_function_mask : 1;
  472. u16 _zeros : 12;
  473. u16 mask_length;
  474. u64 sclp_active_facility_mask;
  475. sccb_mask_t sclp_receive_mask;
  476. sccb_mask_t sclp_send_mask;
  477. u32 read_data_function_mask;
  478. } __attribute__((packed));
  479. /* State change event callback. Inform listeners of changes. */
  480. static void
  481. sclp_state_change_cb(struct evbuf_header *evbuf)
  482. {
  483. unsigned long flags;
  484. struct sclp_statechangebuf *scbuf;
  485. scbuf = (struct sclp_statechangebuf *) evbuf;
  486. if (scbuf->mask_length != sizeof(sccb_mask_t))
  487. return;
  488. spin_lock_irqsave(&sclp_lock, flags);
  489. if (scbuf->validity_sclp_receive_mask)
  490. sclp_receive_mask = scbuf->sclp_receive_mask;
  491. if (scbuf->validity_sclp_send_mask)
  492. sclp_send_mask = scbuf->sclp_send_mask;
  493. spin_unlock_irqrestore(&sclp_lock, flags);
  494. if (scbuf->validity_sclp_active_facility_mask)
  495. sclp_facilities = scbuf->sclp_active_facility_mask;
  496. sclp_dispatch_state_change();
  497. }
  498. static struct sclp_register sclp_state_change_event = {
  499. .receive_mask = EVTYP_STATECHANGE_MASK,
  500. .receiver_fn = sclp_state_change_cb
  501. };
  502. /* Calculate receive and send mask of currently registered listeners.
  503. * Called while sclp_lock is locked. */
  504. static inline void
  505. __sclp_get_mask(sccb_mask_t *receive_mask, sccb_mask_t *send_mask)
  506. {
  507. struct list_head *l;
  508. struct sclp_register *t;
  509. *receive_mask = 0;
  510. *send_mask = 0;
  511. list_for_each(l, &sclp_reg_list) {
  512. t = list_entry(l, struct sclp_register, list);
  513. *receive_mask |= t->receive_mask;
  514. *send_mask |= t->send_mask;
  515. }
  516. }
  517. /* Register event listener. Return 0 on success, non-zero otherwise. */
  518. int
  519. sclp_register(struct sclp_register *reg)
  520. {
  521. unsigned long flags;
  522. sccb_mask_t receive_mask;
  523. sccb_mask_t send_mask;
  524. int rc;
  525. rc = sclp_init();
  526. if (rc)
  527. return rc;
  528. spin_lock_irqsave(&sclp_lock, flags);
  529. /* Check event mask for collisions */
  530. __sclp_get_mask(&receive_mask, &send_mask);
  531. if (reg->receive_mask & receive_mask || reg->send_mask & send_mask) {
  532. spin_unlock_irqrestore(&sclp_lock, flags);
  533. return -EBUSY;
  534. }
  535. /* Trigger initial state change callback */
  536. reg->sclp_receive_mask = 0;
  537. reg->sclp_send_mask = 0;
  538. reg->pm_event_posted = 0;
  539. list_add(&reg->list, &sclp_reg_list);
  540. spin_unlock_irqrestore(&sclp_lock, flags);
  541. rc = sclp_init_mask(1);
  542. if (rc) {
  543. spin_lock_irqsave(&sclp_lock, flags);
  544. list_del(&reg->list);
  545. spin_unlock_irqrestore(&sclp_lock, flags);
  546. }
  547. return rc;
  548. }
  549. EXPORT_SYMBOL(sclp_register);
  550. /* Unregister event listener. */
  551. void
  552. sclp_unregister(struct sclp_register *reg)
  553. {
  554. unsigned long flags;
  555. spin_lock_irqsave(&sclp_lock, flags);
  556. list_del(&reg->list);
  557. spin_unlock_irqrestore(&sclp_lock, flags);
  558. sclp_init_mask(1);
  559. }
  560. EXPORT_SYMBOL(sclp_unregister);
  561. /* Remove event buffers which are marked processed. Return the number of
  562. * remaining event buffers. */
  563. int
  564. sclp_remove_processed(struct sccb_header *sccb)
  565. {
  566. struct evbuf_header *evbuf;
  567. int unprocessed;
  568. u16 remaining;
  569. evbuf = (struct evbuf_header *) (sccb + 1);
  570. unprocessed = 0;
  571. remaining = sccb->length - sizeof(struct sccb_header);
  572. while (remaining > 0) {
  573. remaining -= evbuf->length;
  574. if (evbuf->flags & 0x80) {
  575. sccb->length -= evbuf->length;
  576. memcpy(evbuf, (void *) ((addr_t) evbuf + evbuf->length),
  577. remaining);
  578. } else {
  579. unprocessed++;
  580. evbuf = (struct evbuf_header *)
  581. ((addr_t) evbuf + evbuf->length);
  582. }
  583. }
  584. return unprocessed;
  585. }
  586. EXPORT_SYMBOL(sclp_remove_processed);
  587. struct init_sccb {
  588. struct sccb_header header;
  589. u16 _reserved;
  590. u16 mask_length;
  591. sccb_mask_t receive_mask;
  592. sccb_mask_t send_mask;
  593. sccb_mask_t sclp_receive_mask;
  594. sccb_mask_t sclp_send_mask;
  595. } __attribute__((packed));
  596. /* Prepare init mask request. Called while sclp_lock is locked. */
  597. static inline void
  598. __sclp_make_init_req(u32 receive_mask, u32 send_mask)
  599. {
  600. struct init_sccb *sccb;
  601. sccb = (struct init_sccb *) sclp_init_sccb;
  602. clear_page(sccb);
  603. memset(&sclp_init_req, 0, sizeof(struct sclp_req));
  604. sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK;
  605. sclp_init_req.status = SCLP_REQ_FILLED;
  606. sclp_init_req.start_count = 0;
  607. sclp_init_req.callback = NULL;
  608. sclp_init_req.callback_data = NULL;
  609. sclp_init_req.sccb = sccb;
  610. sccb->header.length = sizeof(struct init_sccb);
  611. sccb->mask_length = sizeof(sccb_mask_t);
  612. sccb->receive_mask = receive_mask;
  613. sccb->send_mask = send_mask;
  614. sccb->sclp_receive_mask = 0;
  615. sccb->sclp_send_mask = 0;
  616. }
  617. /* Start init mask request. If calculate is non-zero, calculate the mask as
  618. * requested by registered listeners. Use zero mask otherwise. Return 0 on
  619. * success, non-zero otherwise. */
  620. static int
  621. sclp_init_mask(int calculate)
  622. {
  623. unsigned long flags;
  624. struct init_sccb *sccb = (struct init_sccb *) sclp_init_sccb;
  625. sccb_mask_t receive_mask;
  626. sccb_mask_t send_mask;
  627. int retry;
  628. int rc;
  629. unsigned long wait;
  630. spin_lock_irqsave(&sclp_lock, flags);
  631. /* Check if interface is in appropriate state */
  632. if (sclp_mask_state != sclp_mask_state_idle) {
  633. spin_unlock_irqrestore(&sclp_lock, flags);
  634. return -EBUSY;
  635. }
  636. if (sclp_activation_state == sclp_activation_state_inactive) {
  637. spin_unlock_irqrestore(&sclp_lock, flags);
  638. return -EINVAL;
  639. }
  640. sclp_mask_state = sclp_mask_state_initializing;
  641. /* Determine mask */
  642. if (calculate)
  643. __sclp_get_mask(&receive_mask, &send_mask);
  644. else {
  645. receive_mask = 0;
  646. send_mask = 0;
  647. }
  648. rc = -EIO;
  649. for (retry = 0; retry <= SCLP_MASK_RETRY; retry++) {
  650. /* Prepare request */
  651. __sclp_make_init_req(receive_mask, send_mask);
  652. spin_unlock_irqrestore(&sclp_lock, flags);
  653. if (sclp_add_request(&sclp_init_req)) {
  654. /* Try again later */
  655. wait = jiffies + SCLP_BUSY_INTERVAL * HZ;
  656. while (time_before(jiffies, wait))
  657. sclp_sync_wait();
  658. spin_lock_irqsave(&sclp_lock, flags);
  659. continue;
  660. }
  661. while (sclp_init_req.status != SCLP_REQ_DONE &&
  662. sclp_init_req.status != SCLP_REQ_FAILED)
  663. sclp_sync_wait();
  664. spin_lock_irqsave(&sclp_lock, flags);
  665. if (sclp_init_req.status == SCLP_REQ_DONE &&
  666. sccb->header.response_code == 0x20) {
  667. /* Successful request */
  668. if (calculate) {
  669. sclp_receive_mask = sccb->sclp_receive_mask;
  670. sclp_send_mask = sccb->sclp_send_mask;
  671. } else {
  672. sclp_receive_mask = 0;
  673. sclp_send_mask = 0;
  674. }
  675. spin_unlock_irqrestore(&sclp_lock, flags);
  676. sclp_dispatch_state_change();
  677. spin_lock_irqsave(&sclp_lock, flags);
  678. rc = 0;
  679. break;
  680. }
  681. }
  682. sclp_mask_state = sclp_mask_state_idle;
  683. spin_unlock_irqrestore(&sclp_lock, flags);
  684. return rc;
  685. }
  686. /* Deactivate SCLP interface. On success, new requests will be rejected,
  687. * events will no longer be dispatched. Return 0 on success, non-zero
  688. * otherwise. */
  689. int
  690. sclp_deactivate(void)
  691. {
  692. unsigned long flags;
  693. int rc;
  694. spin_lock_irqsave(&sclp_lock, flags);
  695. /* Deactivate can only be called when active */
  696. if (sclp_activation_state != sclp_activation_state_active) {
  697. spin_unlock_irqrestore(&sclp_lock, flags);
  698. return -EINVAL;
  699. }
  700. sclp_activation_state = sclp_activation_state_deactivating;
  701. spin_unlock_irqrestore(&sclp_lock, flags);
  702. rc = sclp_init_mask(0);
  703. spin_lock_irqsave(&sclp_lock, flags);
  704. if (rc == 0)
  705. sclp_activation_state = sclp_activation_state_inactive;
  706. else
  707. sclp_activation_state = sclp_activation_state_active;
  708. spin_unlock_irqrestore(&sclp_lock, flags);
  709. return rc;
  710. }
  711. EXPORT_SYMBOL(sclp_deactivate);
  712. /* Reactivate SCLP interface after sclp_deactivate. On success, new
  713. * requests will be accepted, events will be dispatched again. Return 0 on
  714. * success, non-zero otherwise. */
  715. int
  716. sclp_reactivate(void)
  717. {
  718. unsigned long flags;
  719. int rc;
  720. spin_lock_irqsave(&sclp_lock, flags);
  721. /* Reactivate can only be called when inactive */
  722. if (sclp_activation_state != sclp_activation_state_inactive) {
  723. spin_unlock_irqrestore(&sclp_lock, flags);
  724. return -EINVAL;
  725. }
  726. sclp_activation_state = sclp_activation_state_activating;
  727. spin_unlock_irqrestore(&sclp_lock, flags);
  728. rc = sclp_init_mask(1);
  729. spin_lock_irqsave(&sclp_lock, flags);
  730. if (rc == 0)
  731. sclp_activation_state = sclp_activation_state_active;
  732. else
  733. sclp_activation_state = sclp_activation_state_inactive;
  734. spin_unlock_irqrestore(&sclp_lock, flags);
  735. return rc;
  736. }
  737. EXPORT_SYMBOL(sclp_reactivate);
  738. /* Handler for external interruption used during initialization. Modify
  739. * request state to done. */
  740. static void sclp_check_handler(struct ext_code ext_code,
  741. unsigned int param32, unsigned long param64)
  742. {
  743. u32 finished_sccb;
  744. kstat_cpu(smp_processor_id()).irqs[EXTINT_SCP]++;
  745. finished_sccb = param32 & 0xfffffff8;
  746. /* Is this the interrupt we are waiting for? */
  747. if (finished_sccb == 0)
  748. return;
  749. if (finished_sccb != (u32) (addr_t) sclp_init_sccb)
  750. panic("sclp: unsolicited interrupt for buffer at 0x%x\n",
  751. finished_sccb);
  752. spin_lock(&sclp_lock);
  753. if (sclp_running_state == sclp_running_state_running) {
  754. sclp_init_req.status = SCLP_REQ_DONE;
  755. sclp_running_state = sclp_running_state_idle;
  756. }
  757. spin_unlock(&sclp_lock);
  758. }
  759. /* Initial init mask request timed out. Modify request state to failed. */
  760. static void
  761. sclp_check_timeout(unsigned long data)
  762. {
  763. unsigned long flags;
  764. spin_lock_irqsave(&sclp_lock, flags);
  765. if (sclp_running_state == sclp_running_state_running) {
  766. sclp_init_req.status = SCLP_REQ_FAILED;
  767. sclp_running_state = sclp_running_state_idle;
  768. }
  769. spin_unlock_irqrestore(&sclp_lock, flags);
  770. }
  771. /* Perform a check of the SCLP interface. Return zero if the interface is
  772. * available and there are no pending requests from a previous instance.
  773. * Return non-zero otherwise. */
  774. static int
  775. sclp_check_interface(void)
  776. {
  777. struct init_sccb *sccb;
  778. unsigned long flags;
  779. int retry;
  780. int rc;
  781. spin_lock_irqsave(&sclp_lock, flags);
  782. /* Prepare init mask command */
  783. rc = register_external_interrupt(0x2401, sclp_check_handler);
  784. if (rc) {
  785. spin_unlock_irqrestore(&sclp_lock, flags);
  786. return rc;
  787. }
  788. for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) {
  789. __sclp_make_init_req(0, 0);
  790. sccb = (struct init_sccb *) sclp_init_req.sccb;
  791. rc = sclp_service_call(sclp_init_req.command, sccb);
  792. if (rc == -EIO)
  793. break;
  794. sclp_init_req.status = SCLP_REQ_RUNNING;
  795. sclp_running_state = sclp_running_state_running;
  796. __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
  797. sclp_check_timeout, 0);
  798. spin_unlock_irqrestore(&sclp_lock, flags);
  799. /* Enable service-signal interruption - needs to happen
  800. * with IRQs enabled. */
  801. service_subclass_irq_register();
  802. /* Wait for signal from interrupt or timeout */
  803. sclp_sync_wait();
  804. /* Disable service-signal interruption - needs to happen
  805. * with IRQs enabled. */
  806. service_subclass_irq_unregister();
  807. spin_lock_irqsave(&sclp_lock, flags);
  808. del_timer(&sclp_request_timer);
  809. if (sclp_init_req.status == SCLP_REQ_DONE &&
  810. sccb->header.response_code == 0x20) {
  811. rc = 0;
  812. break;
  813. } else
  814. rc = -EBUSY;
  815. }
  816. unregister_external_interrupt(0x2401, sclp_check_handler);
  817. spin_unlock_irqrestore(&sclp_lock, flags);
  818. return rc;
  819. }
  820. /* Reboot event handler. Reset send and receive mask to prevent pending SCLP
  821. * events from interfering with rebooted system. */
  822. static int
  823. sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
  824. {
  825. sclp_deactivate();
  826. return NOTIFY_DONE;
  827. }
  828. static struct notifier_block sclp_reboot_notifier = {
  829. .notifier_call = sclp_reboot_event
  830. };
  831. /*
  832. * Suspend/resume SCLP notifier implementation
  833. */
  834. static void sclp_pm_event(enum sclp_pm_event sclp_pm_event, int rollback)
  835. {
  836. struct sclp_register *reg;
  837. unsigned long flags;
  838. if (!rollback) {
  839. spin_lock_irqsave(&sclp_lock, flags);
  840. list_for_each_entry(reg, &sclp_reg_list, list)
  841. reg->pm_event_posted = 0;
  842. spin_unlock_irqrestore(&sclp_lock, flags);
  843. }
  844. do {
  845. spin_lock_irqsave(&sclp_lock, flags);
  846. list_for_each_entry(reg, &sclp_reg_list, list) {
  847. if (rollback && reg->pm_event_posted)
  848. goto found;
  849. if (!rollback && !reg->pm_event_posted)
  850. goto found;
  851. }
  852. spin_unlock_irqrestore(&sclp_lock, flags);
  853. return;
  854. found:
  855. spin_unlock_irqrestore(&sclp_lock, flags);
  856. if (reg->pm_event_fn)
  857. reg->pm_event_fn(reg, sclp_pm_event);
  858. reg->pm_event_posted = rollback ? 0 : 1;
  859. } while (1);
  860. }
  861. /*
  862. * Susend/resume callbacks for platform device
  863. */
  864. static int sclp_freeze(struct device *dev)
  865. {
  866. unsigned long flags;
  867. int rc;
  868. sclp_pm_event(SCLP_PM_EVENT_FREEZE, 0);
  869. spin_lock_irqsave(&sclp_lock, flags);
  870. sclp_suspend_state = sclp_suspend_state_suspended;
  871. spin_unlock_irqrestore(&sclp_lock, flags);
  872. /* Init supend data */
  873. memset(&sclp_suspend_req, 0, sizeof(sclp_suspend_req));
  874. sclp_suspend_req.callback = sclp_suspend_req_cb;
  875. sclp_suspend_req.status = SCLP_REQ_FILLED;
  876. init_completion(&sclp_request_queue_flushed);
  877. rc = sclp_add_request(&sclp_suspend_req);
  878. if (rc == 0)
  879. wait_for_completion(&sclp_request_queue_flushed);
  880. else if (rc != -ENODATA)
  881. goto fail_thaw;
  882. rc = sclp_deactivate();
  883. if (rc)
  884. goto fail_thaw;
  885. return 0;
  886. fail_thaw:
  887. spin_lock_irqsave(&sclp_lock, flags);
  888. sclp_suspend_state = sclp_suspend_state_running;
  889. spin_unlock_irqrestore(&sclp_lock, flags);
  890. sclp_pm_event(SCLP_PM_EVENT_THAW, 1);
  891. return rc;
  892. }
  893. static int sclp_undo_suspend(enum sclp_pm_event event)
  894. {
  895. unsigned long flags;
  896. int rc;
  897. rc = sclp_reactivate();
  898. if (rc)
  899. return rc;
  900. spin_lock_irqsave(&sclp_lock, flags);
  901. sclp_suspend_state = sclp_suspend_state_running;
  902. spin_unlock_irqrestore(&sclp_lock, flags);
  903. sclp_pm_event(event, 0);
  904. return 0;
  905. }
  906. static int sclp_thaw(struct device *dev)
  907. {
  908. return sclp_undo_suspend(SCLP_PM_EVENT_THAW);
  909. }
  910. static int sclp_restore(struct device *dev)
  911. {
  912. return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE);
  913. }
  914. static const struct dev_pm_ops sclp_pm_ops = {
  915. .freeze = sclp_freeze,
  916. .thaw = sclp_thaw,
  917. .restore = sclp_restore,
  918. };
  919. static struct platform_driver sclp_pdrv = {
  920. .driver = {
  921. .name = "sclp",
  922. .owner = THIS_MODULE,
  923. .pm = &sclp_pm_ops,
  924. },
  925. };
  926. static struct platform_device *sclp_pdev;
  927. /* Initialize SCLP driver. Return zero if driver is operational, non-zero
  928. * otherwise. */
  929. static int
  930. sclp_init(void)
  931. {
  932. unsigned long flags;
  933. int rc = 0;
  934. spin_lock_irqsave(&sclp_lock, flags);
  935. /* Check for previous or running initialization */
  936. if (sclp_init_state != sclp_init_state_uninitialized)
  937. goto fail_unlock;
  938. sclp_init_state = sclp_init_state_initializing;
  939. /* Set up variables */
  940. INIT_LIST_HEAD(&sclp_req_queue);
  941. INIT_LIST_HEAD(&sclp_reg_list);
  942. list_add(&sclp_state_change_event.list, &sclp_reg_list);
  943. init_timer(&sclp_request_timer);
  944. /* Check interface */
  945. spin_unlock_irqrestore(&sclp_lock, flags);
  946. rc = sclp_check_interface();
  947. spin_lock_irqsave(&sclp_lock, flags);
  948. if (rc)
  949. goto fail_init_state_uninitialized;
  950. /* Register reboot handler */
  951. rc = register_reboot_notifier(&sclp_reboot_notifier);
  952. if (rc)
  953. goto fail_init_state_uninitialized;
  954. /* Register interrupt handler */
  955. rc = register_external_interrupt(0x2401, sclp_interrupt_handler);
  956. if (rc)
  957. goto fail_unregister_reboot_notifier;
  958. sclp_init_state = sclp_init_state_initialized;
  959. spin_unlock_irqrestore(&sclp_lock, flags);
  960. /* Enable service-signal external interruption - needs to happen with
  961. * IRQs enabled. */
  962. service_subclass_irq_register();
  963. sclp_init_mask(1);
  964. return 0;
  965. fail_unregister_reboot_notifier:
  966. unregister_reboot_notifier(&sclp_reboot_notifier);
  967. fail_init_state_uninitialized:
  968. sclp_init_state = sclp_init_state_uninitialized;
  969. fail_unlock:
  970. spin_unlock_irqrestore(&sclp_lock, flags);
  971. return rc;
  972. }
  973. /*
  974. * SCLP panic notifier: If we are suspended, we thaw SCLP in order to be able
  975. * to print the panic message.
  976. */
  977. static int sclp_panic_notify(struct notifier_block *self,
  978. unsigned long event, void *data)
  979. {
  980. if (sclp_suspend_state == sclp_suspend_state_suspended)
  981. sclp_undo_suspend(SCLP_PM_EVENT_THAW);
  982. return NOTIFY_OK;
  983. }
  984. static struct notifier_block sclp_on_panic_nb = {
  985. .notifier_call = sclp_panic_notify,
  986. .priority = SCLP_PANIC_PRIO,
  987. };
  988. static __init int sclp_initcall(void)
  989. {
  990. int rc;
  991. rc = platform_driver_register(&sclp_pdrv);
  992. if (rc)
  993. return rc;
  994. sclp_pdev = platform_device_register_simple("sclp", -1, NULL, 0);
  995. rc = IS_ERR(sclp_pdev) ? PTR_ERR(sclp_pdev) : 0;
  996. if (rc)
  997. goto fail_platform_driver_unregister;
  998. rc = atomic_notifier_chain_register(&panic_notifier_list,
  999. &sclp_on_panic_nb);
  1000. if (rc)
  1001. goto fail_platform_device_unregister;
  1002. return sclp_init();
  1003. fail_platform_device_unregister:
  1004. platform_device_unregister(sclp_pdev);
  1005. fail_platform_driver_unregister:
  1006. platform_driver_unregister(&sclp_pdrv);
  1007. return rc;
  1008. }
  1009. arch_initcall(sclp_initcall);