cmf.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404
  1. /*
  2. * Linux on zSeries Channel Measurement Facility support
  3. *
  4. * Copyright IBM Corp. 2000, 2006
  5. *
  6. * Authors: Arnd Bergmann <arndb@de.ibm.com>
  7. * Cornelia Huck <cornelia.huck@de.ibm.com>
  8. *
  9. * original idea from Natarajan Krishnaswami <nkrishna@us.ibm.com>
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License as published by
  13. * the Free Software Foundation; either version 2, or (at your option)
  14. * any later version.
  15. *
  16. * This program is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  19. * GNU General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program; if not, write to the Free Software
  23. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  24. */
  25. #define KMSG_COMPONENT "cio"
  26. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  27. #include <linux/bootmem.h>
  28. #include <linux/device.h>
  29. #include <linux/init.h>
  30. #include <linux/list.h>
  31. #include <linux/module.h>
  32. #include <linux/moduleparam.h>
  33. #include <linux/slab.h>
  34. #include <linux/timex.h> /* get_tod_clock() */
  35. #include <asm/ccwdev.h>
  36. #include <asm/cio.h>
  37. #include <asm/cmb.h>
  38. #include <asm/div64.h>
  39. #include "cio.h"
  40. #include "css.h"
  41. #include "device.h"
  42. #include "ioasm.h"
  43. #include "chsc.h"
  44. /*
  45. * parameter to enable cmf during boot, possible uses are:
  46. * "s390cmf" -- enable cmf and allocate 2 MB of ram so measuring can be
  47. * used on any subchannel
  48. * "s390cmf=<num>" -- enable cmf and allocate enough memory to measure
  49. * <num> subchannel, where <num> is an integer
  50. * between 1 and 65535, default is 1024
  51. */
  52. #define ARGSTRING "s390cmf"
  53. /* indices for READCMB */
  54. enum cmb_index {
  55. /* basic and exended format: */
  56. cmb_ssch_rsch_count,
  57. cmb_sample_count,
  58. cmb_device_connect_time,
  59. cmb_function_pending_time,
  60. cmb_device_disconnect_time,
  61. cmb_control_unit_queuing_time,
  62. cmb_device_active_only_time,
  63. /* extended format only: */
  64. cmb_device_busy_time,
  65. cmb_initial_command_response_time,
  66. };
  67. /**
  68. * enum cmb_format - types of supported measurement block formats
  69. *
  70. * @CMF_BASIC: traditional channel measurement blocks supported
  71. * by all machines that we run on
  72. * @CMF_EXTENDED: improved format that was introduced with the z990
  73. * machine
  74. * @CMF_AUTODETECT: default: use extended format when running on a machine
  75. * supporting extended format, otherwise fall back to
  76. * basic format
  77. */
  78. enum cmb_format {
  79. CMF_BASIC,
  80. CMF_EXTENDED,
  81. CMF_AUTODETECT = -1,
  82. };
  83. /*
  84. * format - actual format for all measurement blocks
  85. *
  86. * The format module parameter can be set to a value of 0 (zero)
  87. * or 1, indicating basic or extended format as described for
  88. * enum cmb_format.
  89. */
  90. static int format = CMF_AUTODETECT;
  91. module_param(format, bint, 0444);
  92. /**
  93. * struct cmb_operations - functions to use depending on cmb_format
  94. *
  95. * Most of these functions operate on a struct ccw_device. There is only
  96. * one instance of struct cmb_operations because the format of the measurement
  97. * data is guaranteed to be the same for every ccw_device.
  98. *
  99. * @alloc: allocate memory for a channel measurement block,
  100. * either with the help of a special pool or with kmalloc
  101. * @free: free memory allocated with @alloc
  102. * @set: enable or disable measurement
  103. * @read: read a measurement entry at an index
  104. * @readall: read a measurement block in a common format
  105. * @reset: clear the data in the associated measurement block and
  106. * reset its time stamp
  107. */
  108. struct cmb_operations {
  109. int (*alloc) (struct ccw_device *);
  110. void (*free) (struct ccw_device *);
  111. int (*set) (struct ccw_device *, u32);
  112. u64 (*read) (struct ccw_device *, int);
  113. int (*readall)(struct ccw_device *, struct cmbdata *);
  114. void (*reset) (struct ccw_device *);
  115. /* private: */
  116. struct attribute_group *attr_group;
  117. };
  118. static struct cmb_operations *cmbops;
  119. struct cmb_data {
  120. void *hw_block; /* Pointer to block updated by hardware */
  121. void *last_block; /* Last changed block copied from hardware block */
  122. int size; /* Size of hw_block and last_block */
  123. unsigned long long last_update; /* when last_block was updated */
  124. };
  125. /*
  126. * Our user interface is designed in terms of nanoseconds,
  127. * while the hardware measures total times in its own
  128. * unit.
  129. */
  130. static inline u64 time_to_nsec(u32 value)
  131. {
  132. return ((u64)value) * 128000ull;
  133. }
  134. /*
  135. * Users are usually interested in average times,
  136. * not accumulated time.
  137. * This also helps us with atomicity problems
  138. * when reading sinlge values.
  139. */
  140. static inline u64 time_to_avg_nsec(u32 value, u32 count)
  141. {
  142. u64 ret;
  143. /* no samples yet, avoid division by 0 */
  144. if (count == 0)
  145. return 0;
  146. /* value comes in units of 128 µsec */
  147. ret = time_to_nsec(value);
  148. do_div(ret, count);
  149. return ret;
  150. }
  151. #define CMF_OFF 0
  152. #define CMF_ON 2
  153. /*
  154. * Activate or deactivate the channel monitor. When area is NULL,
  155. * the monitor is deactivated. The channel monitor needs to
  156. * be active in order to measure subchannels, which also need
  157. * to be enabled.
  158. */
  159. static inline void cmf_activate(void *area, unsigned int onoff)
  160. {
  161. register void * __gpr2 asm("2");
  162. register long __gpr1 asm("1");
  163. __gpr2 = area;
  164. __gpr1 = onoff;
  165. /* activate channel measurement */
  166. asm("schm" : : "d" (__gpr2), "d" (__gpr1) );
  167. }
  168. static int set_schib(struct ccw_device *cdev, u32 mme, int mbfc,
  169. unsigned long address)
  170. {
  171. struct subchannel *sch = to_subchannel(cdev->dev.parent);
  172. int ret;
  173. sch->config.mme = mme;
  174. sch->config.mbfc = mbfc;
  175. /* address can be either a block address or a block index */
  176. if (mbfc)
  177. sch->config.mba = address;
  178. else
  179. sch->config.mbi = address;
  180. ret = cio_commit_config(sch);
  181. if (!mme && ret == -ENODEV) {
  182. /*
  183. * The task was to disable measurement block updates but
  184. * the subchannel is already gone. Report success.
  185. */
  186. ret = 0;
  187. }
  188. return ret;
  189. }
  190. struct set_schib_struct {
  191. u32 mme;
  192. int mbfc;
  193. unsigned long address;
  194. wait_queue_head_t wait;
  195. int ret;
  196. struct kref kref;
  197. };
  198. static void cmf_set_schib_release(struct kref *kref)
  199. {
  200. struct set_schib_struct *set_data;
  201. set_data = container_of(kref, struct set_schib_struct, kref);
  202. kfree(set_data);
  203. }
  204. #define CMF_PENDING 1
  205. static int set_schib_wait(struct ccw_device *cdev, u32 mme,
  206. int mbfc, unsigned long address)
  207. {
  208. struct set_schib_struct *set_data;
  209. int ret;
  210. spin_lock_irq(cdev->ccwlock);
  211. if (!cdev->private->cmb) {
  212. ret = -ENODEV;
  213. goto out;
  214. }
  215. set_data = kzalloc(sizeof(struct set_schib_struct), GFP_ATOMIC);
  216. if (!set_data) {
  217. ret = -ENOMEM;
  218. goto out;
  219. }
  220. init_waitqueue_head(&set_data->wait);
  221. kref_init(&set_data->kref);
  222. set_data->mme = mme;
  223. set_data->mbfc = mbfc;
  224. set_data->address = address;
  225. ret = set_schib(cdev, mme, mbfc, address);
  226. if (ret != -EBUSY)
  227. goto out_put;
  228. if (cdev->private->state != DEV_STATE_ONLINE) {
  229. /* if the device is not online, don't even try again */
  230. ret = -EBUSY;
  231. goto out_put;
  232. }
  233. cdev->private->state = DEV_STATE_CMFCHANGE;
  234. set_data->ret = CMF_PENDING;
  235. cdev->private->cmb_wait = set_data;
  236. spin_unlock_irq(cdev->ccwlock);
  237. if (wait_event_interruptible(set_data->wait,
  238. set_data->ret != CMF_PENDING)) {
  239. spin_lock_irq(cdev->ccwlock);
  240. if (set_data->ret == CMF_PENDING) {
  241. set_data->ret = -ERESTARTSYS;
  242. if (cdev->private->state == DEV_STATE_CMFCHANGE)
  243. cdev->private->state = DEV_STATE_ONLINE;
  244. }
  245. spin_unlock_irq(cdev->ccwlock);
  246. }
  247. spin_lock_irq(cdev->ccwlock);
  248. cdev->private->cmb_wait = NULL;
  249. ret = set_data->ret;
  250. out_put:
  251. kref_put(&set_data->kref, cmf_set_schib_release);
  252. out:
  253. spin_unlock_irq(cdev->ccwlock);
  254. return ret;
  255. }
  256. void retry_set_schib(struct ccw_device *cdev)
  257. {
  258. struct set_schib_struct *set_data;
  259. set_data = cdev->private->cmb_wait;
  260. if (!set_data) {
  261. WARN_ON(1);
  262. return;
  263. }
  264. kref_get(&set_data->kref);
  265. set_data->ret = set_schib(cdev, set_data->mme, set_data->mbfc,
  266. set_data->address);
  267. wake_up(&set_data->wait);
  268. kref_put(&set_data->kref, cmf_set_schib_release);
  269. }
  270. static int cmf_copy_block(struct ccw_device *cdev)
  271. {
  272. struct subchannel *sch;
  273. void *reference_buf;
  274. void *hw_block;
  275. struct cmb_data *cmb_data;
  276. sch = to_subchannel(cdev->dev.parent);
  277. if (cio_update_schib(sch))
  278. return -ENODEV;
  279. if (scsw_fctl(&sch->schib.scsw) & SCSW_FCTL_START_FUNC) {
  280. /* Don't copy if a start function is in progress. */
  281. if ((!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_SUSPENDED)) &&
  282. (scsw_actl(&sch->schib.scsw) &
  283. (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) &&
  284. (!(scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_SEC_STATUS)))
  285. return -EBUSY;
  286. }
  287. cmb_data = cdev->private->cmb;
  288. hw_block = cmb_data->hw_block;
  289. if (!memcmp(cmb_data->last_block, hw_block, cmb_data->size))
  290. /* No need to copy. */
  291. return 0;
  292. reference_buf = kzalloc(cmb_data->size, GFP_ATOMIC);
  293. if (!reference_buf)
  294. return -ENOMEM;
  295. /* Ensure consistency of block copied from hardware. */
  296. do {
  297. memcpy(cmb_data->last_block, hw_block, cmb_data->size);
  298. memcpy(reference_buf, hw_block, cmb_data->size);
  299. } while (memcmp(cmb_data->last_block, reference_buf, cmb_data->size));
  300. cmb_data->last_update = get_tod_clock();
  301. kfree(reference_buf);
  302. return 0;
  303. }
  304. struct copy_block_struct {
  305. wait_queue_head_t wait;
  306. int ret;
  307. struct kref kref;
  308. };
  309. static void cmf_copy_block_release(struct kref *kref)
  310. {
  311. struct copy_block_struct *copy_block;
  312. copy_block = container_of(kref, struct copy_block_struct, kref);
  313. kfree(copy_block);
  314. }
  315. static int cmf_cmb_copy_wait(struct ccw_device *cdev)
  316. {
  317. struct copy_block_struct *copy_block;
  318. int ret;
  319. unsigned long flags;
  320. spin_lock_irqsave(cdev->ccwlock, flags);
  321. if (!cdev->private->cmb) {
  322. ret = -ENODEV;
  323. goto out;
  324. }
  325. copy_block = kzalloc(sizeof(struct copy_block_struct), GFP_ATOMIC);
  326. if (!copy_block) {
  327. ret = -ENOMEM;
  328. goto out;
  329. }
  330. init_waitqueue_head(&copy_block->wait);
  331. kref_init(&copy_block->kref);
  332. ret = cmf_copy_block(cdev);
  333. if (ret != -EBUSY)
  334. goto out_put;
  335. if (cdev->private->state != DEV_STATE_ONLINE) {
  336. ret = -EBUSY;
  337. goto out_put;
  338. }
  339. cdev->private->state = DEV_STATE_CMFUPDATE;
  340. copy_block->ret = CMF_PENDING;
  341. cdev->private->cmb_wait = copy_block;
  342. spin_unlock_irqrestore(cdev->ccwlock, flags);
  343. if (wait_event_interruptible(copy_block->wait,
  344. copy_block->ret != CMF_PENDING)) {
  345. spin_lock_irqsave(cdev->ccwlock, flags);
  346. if (copy_block->ret == CMF_PENDING) {
  347. copy_block->ret = -ERESTARTSYS;
  348. if (cdev->private->state == DEV_STATE_CMFUPDATE)
  349. cdev->private->state = DEV_STATE_ONLINE;
  350. }
  351. spin_unlock_irqrestore(cdev->ccwlock, flags);
  352. }
  353. spin_lock_irqsave(cdev->ccwlock, flags);
  354. cdev->private->cmb_wait = NULL;
  355. ret = copy_block->ret;
  356. out_put:
  357. kref_put(&copy_block->kref, cmf_copy_block_release);
  358. out:
  359. spin_unlock_irqrestore(cdev->ccwlock, flags);
  360. return ret;
  361. }
  362. void cmf_retry_copy_block(struct ccw_device *cdev)
  363. {
  364. struct copy_block_struct *copy_block;
  365. copy_block = cdev->private->cmb_wait;
  366. if (!copy_block) {
  367. WARN_ON(1);
  368. return;
  369. }
  370. kref_get(&copy_block->kref);
  371. copy_block->ret = cmf_copy_block(cdev);
  372. wake_up(&copy_block->wait);
  373. kref_put(&copy_block->kref, cmf_copy_block_release);
  374. }
  375. static void cmf_generic_reset(struct ccw_device *cdev)
  376. {
  377. struct cmb_data *cmb_data;
  378. spin_lock_irq(cdev->ccwlock);
  379. cmb_data = cdev->private->cmb;
  380. if (cmb_data) {
  381. memset(cmb_data->last_block, 0, cmb_data->size);
  382. /*
  383. * Need to reset hw block as well to make the hardware start
  384. * from 0 again.
  385. */
  386. memset(cmb_data->hw_block, 0, cmb_data->size);
  387. cmb_data->last_update = 0;
  388. }
  389. cdev->private->cmb_start_time = get_tod_clock();
  390. spin_unlock_irq(cdev->ccwlock);
  391. }
  392. /**
  393. * struct cmb_area - container for global cmb data
  394. *
  395. * @mem: pointer to CMBs (only in basic measurement mode)
  396. * @list: contains a linked list of all subchannels
  397. * @num_channels: number of channels to be measured
  398. * @lock: protect concurrent access to @mem and @list
  399. */
  400. struct cmb_area {
  401. struct cmb *mem;
  402. struct list_head list;
  403. int num_channels;
  404. spinlock_t lock;
  405. };
  406. static struct cmb_area cmb_area = {
  407. .lock = __SPIN_LOCK_UNLOCKED(cmb_area.lock),
  408. .list = LIST_HEAD_INIT(cmb_area.list),
  409. .num_channels = 1024,
  410. };
  411. /* ****** old style CMB handling ********/
  412. /*
  413. * Basic channel measurement blocks are allocated in one contiguous
  414. * block of memory, which can not be moved as long as any channel
  415. * is active. Therefore, a maximum number of subchannels needs to
  416. * be defined somewhere. This is a module parameter, defaulting to
  417. * a reasonable value of 1024, or 32 kb of memory.
  418. * Current kernels don't allow kmalloc with more than 128kb, so the
  419. * maximum is 4096.
  420. */
  421. module_param_named(maxchannels, cmb_area.num_channels, uint, 0444);
  422. /**
  423. * struct cmb - basic channel measurement block
  424. * @ssch_rsch_count: number of ssch and rsch
  425. * @sample_count: number of samples
  426. * @device_connect_time: time of device connect
  427. * @function_pending_time: time of function pending
  428. * @device_disconnect_time: time of device disconnect
  429. * @control_unit_queuing_time: time of control unit queuing
  430. * @device_active_only_time: time of device active only
  431. * @reserved: unused in basic measurement mode
  432. *
  433. * The measurement block as used by the hardware. The fields are described
  434. * further in z/Architecture Principles of Operation, chapter 17.
  435. *
  436. * The cmb area made up from these blocks must be a contiguous array and may
  437. * not be reallocated or freed.
  438. * Only one cmb area can be present in the system.
  439. */
  440. struct cmb {
  441. u16 ssch_rsch_count;
  442. u16 sample_count;
  443. u32 device_connect_time;
  444. u32 function_pending_time;
  445. u32 device_disconnect_time;
  446. u32 control_unit_queuing_time;
  447. u32 device_active_only_time;
  448. u32 reserved[2];
  449. };
  450. /*
  451. * Insert a single device into the cmb_area list.
  452. * Called with cmb_area.lock held from alloc_cmb.
  453. */
  454. static int alloc_cmb_single(struct ccw_device *cdev,
  455. struct cmb_data *cmb_data)
  456. {
  457. struct cmb *cmb;
  458. struct ccw_device_private *node;
  459. int ret;
  460. spin_lock_irq(cdev->ccwlock);
  461. if (!list_empty(&cdev->private->cmb_list)) {
  462. ret = -EBUSY;
  463. goto out;
  464. }
  465. /*
  466. * Find first unused cmb in cmb_area.mem.
  467. * This is a little tricky: cmb_area.list
  468. * remains sorted by ->cmb->hw_data pointers.
  469. */
  470. cmb = cmb_area.mem;
  471. list_for_each_entry(node, &cmb_area.list, cmb_list) {
  472. struct cmb_data *data;
  473. data = node->cmb;
  474. if ((struct cmb*)data->hw_block > cmb)
  475. break;
  476. cmb++;
  477. }
  478. if (cmb - cmb_area.mem >= cmb_area.num_channels) {
  479. ret = -ENOMEM;
  480. goto out;
  481. }
  482. /* insert new cmb */
  483. list_add_tail(&cdev->private->cmb_list, &node->cmb_list);
  484. cmb_data->hw_block = cmb;
  485. cdev->private->cmb = cmb_data;
  486. ret = 0;
  487. out:
  488. spin_unlock_irq(cdev->ccwlock);
  489. return ret;
  490. }
  491. static int alloc_cmb(struct ccw_device *cdev)
  492. {
  493. int ret;
  494. struct cmb *mem;
  495. ssize_t size;
  496. struct cmb_data *cmb_data;
  497. /* Allocate private cmb_data. */
  498. cmb_data = kzalloc(sizeof(struct cmb_data), GFP_KERNEL);
  499. if (!cmb_data)
  500. return -ENOMEM;
  501. cmb_data->last_block = kzalloc(sizeof(struct cmb), GFP_KERNEL);
  502. if (!cmb_data->last_block) {
  503. kfree(cmb_data);
  504. return -ENOMEM;
  505. }
  506. cmb_data->size = sizeof(struct cmb);
  507. spin_lock(&cmb_area.lock);
  508. if (!cmb_area.mem) {
  509. /* there is no user yet, so we need a new area */
  510. size = sizeof(struct cmb) * cmb_area.num_channels;
  511. WARN_ON(!list_empty(&cmb_area.list));
  512. spin_unlock(&cmb_area.lock);
  513. mem = (void*)__get_free_pages(GFP_KERNEL | GFP_DMA,
  514. get_order(size));
  515. spin_lock(&cmb_area.lock);
  516. if (cmb_area.mem) {
  517. /* ok, another thread was faster */
  518. free_pages((unsigned long)mem, get_order(size));
  519. } else if (!mem) {
  520. /* no luck */
  521. ret = -ENOMEM;
  522. goto out;
  523. } else {
  524. /* everything ok */
  525. memset(mem, 0, size);
  526. cmb_area.mem = mem;
  527. cmf_activate(cmb_area.mem, CMF_ON);
  528. }
  529. }
  530. /* do the actual allocation */
  531. ret = alloc_cmb_single(cdev, cmb_data);
  532. out:
  533. spin_unlock(&cmb_area.lock);
  534. if (ret) {
  535. kfree(cmb_data->last_block);
  536. kfree(cmb_data);
  537. }
  538. return ret;
  539. }
  540. static void free_cmb(struct ccw_device *cdev)
  541. {
  542. struct ccw_device_private *priv;
  543. struct cmb_data *cmb_data;
  544. spin_lock(&cmb_area.lock);
  545. spin_lock_irq(cdev->ccwlock);
  546. priv = cdev->private;
  547. cmb_data = priv->cmb;
  548. priv->cmb = NULL;
  549. if (cmb_data)
  550. kfree(cmb_data->last_block);
  551. kfree(cmb_data);
  552. list_del_init(&priv->cmb_list);
  553. if (list_empty(&cmb_area.list)) {
  554. ssize_t size;
  555. size = sizeof(struct cmb) * cmb_area.num_channels;
  556. cmf_activate(NULL, CMF_OFF);
  557. free_pages((unsigned long)cmb_area.mem, get_order(size));
  558. cmb_area.mem = NULL;
  559. }
  560. spin_unlock_irq(cdev->ccwlock);
  561. spin_unlock(&cmb_area.lock);
  562. }
  563. static int set_cmb(struct ccw_device *cdev, u32 mme)
  564. {
  565. u16 offset;
  566. struct cmb_data *cmb_data;
  567. unsigned long flags;
  568. spin_lock_irqsave(cdev->ccwlock, flags);
  569. if (!cdev->private->cmb) {
  570. spin_unlock_irqrestore(cdev->ccwlock, flags);
  571. return -EINVAL;
  572. }
  573. cmb_data = cdev->private->cmb;
  574. offset = mme ? (struct cmb *)cmb_data->hw_block - cmb_area.mem : 0;
  575. spin_unlock_irqrestore(cdev->ccwlock, flags);
  576. return set_schib_wait(cdev, mme, 0, offset);
  577. }
  578. static u64 read_cmb(struct ccw_device *cdev, int index)
  579. {
  580. struct cmb *cmb;
  581. u32 val;
  582. int ret;
  583. unsigned long flags;
  584. ret = cmf_cmb_copy_wait(cdev);
  585. if (ret < 0)
  586. return 0;
  587. spin_lock_irqsave(cdev->ccwlock, flags);
  588. if (!cdev->private->cmb) {
  589. ret = 0;
  590. goto out;
  591. }
  592. cmb = ((struct cmb_data *)cdev->private->cmb)->last_block;
  593. switch (index) {
  594. case cmb_ssch_rsch_count:
  595. ret = cmb->ssch_rsch_count;
  596. goto out;
  597. case cmb_sample_count:
  598. ret = cmb->sample_count;
  599. goto out;
  600. case cmb_device_connect_time:
  601. val = cmb->device_connect_time;
  602. break;
  603. case cmb_function_pending_time:
  604. val = cmb->function_pending_time;
  605. break;
  606. case cmb_device_disconnect_time:
  607. val = cmb->device_disconnect_time;
  608. break;
  609. case cmb_control_unit_queuing_time:
  610. val = cmb->control_unit_queuing_time;
  611. break;
  612. case cmb_device_active_only_time:
  613. val = cmb->device_active_only_time;
  614. break;
  615. default:
  616. ret = 0;
  617. goto out;
  618. }
  619. ret = time_to_avg_nsec(val, cmb->sample_count);
  620. out:
  621. spin_unlock_irqrestore(cdev->ccwlock, flags);
  622. return ret;
  623. }
  624. static int readall_cmb(struct ccw_device *cdev, struct cmbdata *data)
  625. {
  626. struct cmb *cmb;
  627. struct cmb_data *cmb_data;
  628. u64 time;
  629. unsigned long flags;
  630. int ret;
  631. ret = cmf_cmb_copy_wait(cdev);
  632. if (ret < 0)
  633. return ret;
  634. spin_lock_irqsave(cdev->ccwlock, flags);
  635. cmb_data = cdev->private->cmb;
  636. if (!cmb_data) {
  637. ret = -ENODEV;
  638. goto out;
  639. }
  640. if (cmb_data->last_update == 0) {
  641. ret = -EAGAIN;
  642. goto out;
  643. }
  644. cmb = cmb_data->last_block;
  645. time = cmb_data->last_update - cdev->private->cmb_start_time;
  646. memset(data, 0, sizeof(struct cmbdata));
  647. /* we only know values before device_busy_time */
  648. data->size = offsetof(struct cmbdata, device_busy_time);
  649. /* convert to nanoseconds */
  650. data->elapsed_time = (time * 1000) >> 12;
  651. /* copy data to new structure */
  652. data->ssch_rsch_count = cmb->ssch_rsch_count;
  653. data->sample_count = cmb->sample_count;
  654. /* time fields are converted to nanoseconds while copying */
  655. data->device_connect_time = time_to_nsec(cmb->device_connect_time);
  656. data->function_pending_time = time_to_nsec(cmb->function_pending_time);
  657. data->device_disconnect_time =
  658. time_to_nsec(cmb->device_disconnect_time);
  659. data->control_unit_queuing_time
  660. = time_to_nsec(cmb->control_unit_queuing_time);
  661. data->device_active_only_time
  662. = time_to_nsec(cmb->device_active_only_time);
  663. ret = 0;
  664. out:
  665. spin_unlock_irqrestore(cdev->ccwlock, flags);
  666. return ret;
  667. }
  668. static void reset_cmb(struct ccw_device *cdev)
  669. {
  670. cmf_generic_reset(cdev);
  671. }
  672. static int cmf_enabled(struct ccw_device *cdev)
  673. {
  674. int enabled;
  675. spin_lock_irq(cdev->ccwlock);
  676. enabled = !!cdev->private->cmb;
  677. spin_unlock_irq(cdev->ccwlock);
  678. return enabled;
  679. }
  680. static struct attribute_group cmf_attr_group;
  681. static struct cmb_operations cmbops_basic = {
  682. .alloc = alloc_cmb,
  683. .free = free_cmb,
  684. .set = set_cmb,
  685. .read = read_cmb,
  686. .readall = readall_cmb,
  687. .reset = reset_cmb,
  688. .attr_group = &cmf_attr_group,
  689. };
  690. /* ******** extended cmb handling ********/
  691. /**
  692. * struct cmbe - extended channel measurement block
  693. * @ssch_rsch_count: number of ssch and rsch
  694. * @sample_count: number of samples
  695. * @device_connect_time: time of device connect
  696. * @function_pending_time: time of function pending
  697. * @device_disconnect_time: time of device disconnect
  698. * @control_unit_queuing_time: time of control unit queuing
  699. * @device_active_only_time: time of device active only
  700. * @device_busy_time: time of device busy
  701. * @initial_command_response_time: initial command response time
  702. * @reserved: unused
  703. *
  704. * The measurement block as used by the hardware. May be in any 64 bit physical
  705. * location.
  706. * The fields are described further in z/Architecture Principles of Operation,
  707. * third edition, chapter 17.
  708. */
  709. struct cmbe {
  710. u32 ssch_rsch_count;
  711. u32 sample_count;
  712. u32 device_connect_time;
  713. u32 function_pending_time;
  714. u32 device_disconnect_time;
  715. u32 control_unit_queuing_time;
  716. u32 device_active_only_time;
  717. u32 device_busy_time;
  718. u32 initial_command_response_time;
  719. u32 reserved[7];
  720. } __packed __aligned(64);
  721. static struct kmem_cache *cmbe_cache;
  722. static int alloc_cmbe(struct ccw_device *cdev)
  723. {
  724. struct cmb_data *cmb_data;
  725. struct cmbe *cmbe;
  726. int ret = -ENOMEM;
  727. cmbe = kmem_cache_zalloc(cmbe_cache, GFP_KERNEL);
  728. if (!cmbe)
  729. return ret;
  730. cmb_data = kzalloc(sizeof(*cmb_data), GFP_KERNEL);
  731. if (!cmb_data)
  732. goto out_free;
  733. cmb_data->last_block = kzalloc(sizeof(struct cmbe), GFP_KERNEL);
  734. if (!cmb_data->last_block)
  735. goto out_free;
  736. cmb_data->size = sizeof(*cmbe);
  737. cmb_data->hw_block = cmbe;
  738. spin_lock(&cmb_area.lock);
  739. spin_lock_irq(cdev->ccwlock);
  740. if (cdev->private->cmb)
  741. goto out_unlock;
  742. cdev->private->cmb = cmb_data;
  743. /* activate global measurement if this is the first channel */
  744. if (list_empty(&cmb_area.list))
  745. cmf_activate(NULL, CMF_ON);
  746. list_add_tail(&cdev->private->cmb_list, &cmb_area.list);
  747. spin_unlock_irq(cdev->ccwlock);
  748. spin_unlock(&cmb_area.lock);
  749. return 0;
  750. out_unlock:
  751. spin_unlock_irq(cdev->ccwlock);
  752. spin_unlock(&cmb_area.lock);
  753. ret = -EBUSY;
  754. out_free:
  755. if (cmb_data)
  756. kfree(cmb_data->last_block);
  757. kfree(cmb_data);
  758. kmem_cache_free(cmbe_cache, cmbe);
  759. return ret;
  760. }
  761. static void free_cmbe(struct ccw_device *cdev)
  762. {
  763. struct cmb_data *cmb_data;
  764. spin_lock(&cmb_area.lock);
  765. spin_lock_irq(cdev->ccwlock);
  766. cmb_data = cdev->private->cmb;
  767. cdev->private->cmb = NULL;
  768. if (cmb_data) {
  769. kfree(cmb_data->last_block);
  770. kmem_cache_free(cmbe_cache, cmb_data->hw_block);
  771. }
  772. kfree(cmb_data);
  773. /* deactivate global measurement if this is the last channel */
  774. list_del_init(&cdev->private->cmb_list);
  775. if (list_empty(&cmb_area.list))
  776. cmf_activate(NULL, CMF_OFF);
  777. spin_unlock_irq(cdev->ccwlock);
  778. spin_unlock(&cmb_area.lock);
  779. }
  780. static int set_cmbe(struct ccw_device *cdev, u32 mme)
  781. {
  782. unsigned long mba;
  783. struct cmb_data *cmb_data;
  784. unsigned long flags;
  785. spin_lock_irqsave(cdev->ccwlock, flags);
  786. if (!cdev->private->cmb) {
  787. spin_unlock_irqrestore(cdev->ccwlock, flags);
  788. return -EINVAL;
  789. }
  790. cmb_data = cdev->private->cmb;
  791. mba = mme ? (unsigned long) cmb_data->hw_block : 0;
  792. spin_unlock_irqrestore(cdev->ccwlock, flags);
  793. return set_schib_wait(cdev, mme, 1, mba);
  794. }
  795. static u64 read_cmbe(struct ccw_device *cdev, int index)
  796. {
  797. struct cmbe *cmb;
  798. struct cmb_data *cmb_data;
  799. u32 val;
  800. int ret;
  801. unsigned long flags;
  802. ret = cmf_cmb_copy_wait(cdev);
  803. if (ret < 0)
  804. return 0;
  805. spin_lock_irqsave(cdev->ccwlock, flags);
  806. cmb_data = cdev->private->cmb;
  807. if (!cmb_data) {
  808. ret = 0;
  809. goto out;
  810. }
  811. cmb = cmb_data->last_block;
  812. switch (index) {
  813. case cmb_ssch_rsch_count:
  814. ret = cmb->ssch_rsch_count;
  815. goto out;
  816. case cmb_sample_count:
  817. ret = cmb->sample_count;
  818. goto out;
  819. case cmb_device_connect_time:
  820. val = cmb->device_connect_time;
  821. break;
  822. case cmb_function_pending_time:
  823. val = cmb->function_pending_time;
  824. break;
  825. case cmb_device_disconnect_time:
  826. val = cmb->device_disconnect_time;
  827. break;
  828. case cmb_control_unit_queuing_time:
  829. val = cmb->control_unit_queuing_time;
  830. break;
  831. case cmb_device_active_only_time:
  832. val = cmb->device_active_only_time;
  833. break;
  834. case cmb_device_busy_time:
  835. val = cmb->device_busy_time;
  836. break;
  837. case cmb_initial_command_response_time:
  838. val = cmb->initial_command_response_time;
  839. break;
  840. default:
  841. ret = 0;
  842. goto out;
  843. }
  844. ret = time_to_avg_nsec(val, cmb->sample_count);
  845. out:
  846. spin_unlock_irqrestore(cdev->ccwlock, flags);
  847. return ret;
  848. }
  849. static int readall_cmbe(struct ccw_device *cdev, struct cmbdata *data)
  850. {
  851. struct cmbe *cmb;
  852. struct cmb_data *cmb_data;
  853. u64 time;
  854. unsigned long flags;
  855. int ret;
  856. ret = cmf_cmb_copy_wait(cdev);
  857. if (ret < 0)
  858. return ret;
  859. spin_lock_irqsave(cdev->ccwlock, flags);
  860. cmb_data = cdev->private->cmb;
  861. if (!cmb_data) {
  862. ret = -ENODEV;
  863. goto out;
  864. }
  865. if (cmb_data->last_update == 0) {
  866. ret = -EAGAIN;
  867. goto out;
  868. }
  869. time = cmb_data->last_update - cdev->private->cmb_start_time;
  870. memset (data, 0, sizeof(struct cmbdata));
  871. /* we only know values before device_busy_time */
  872. data->size = offsetof(struct cmbdata, device_busy_time);
  873. /* conver to nanoseconds */
  874. data->elapsed_time = (time * 1000) >> 12;
  875. cmb = cmb_data->last_block;
  876. /* copy data to new structure */
  877. data->ssch_rsch_count = cmb->ssch_rsch_count;
  878. data->sample_count = cmb->sample_count;
  879. /* time fields are converted to nanoseconds while copying */
  880. data->device_connect_time = time_to_nsec(cmb->device_connect_time);
  881. data->function_pending_time = time_to_nsec(cmb->function_pending_time);
  882. data->device_disconnect_time =
  883. time_to_nsec(cmb->device_disconnect_time);
  884. data->control_unit_queuing_time
  885. = time_to_nsec(cmb->control_unit_queuing_time);
  886. data->device_active_only_time
  887. = time_to_nsec(cmb->device_active_only_time);
  888. data->device_busy_time = time_to_nsec(cmb->device_busy_time);
  889. data->initial_command_response_time
  890. = time_to_nsec(cmb->initial_command_response_time);
  891. ret = 0;
  892. out:
  893. spin_unlock_irqrestore(cdev->ccwlock, flags);
  894. return ret;
  895. }
  896. static void reset_cmbe(struct ccw_device *cdev)
  897. {
  898. cmf_generic_reset(cdev);
  899. }
  900. static struct attribute_group cmf_attr_group_ext;
  901. static struct cmb_operations cmbops_extended = {
  902. .alloc = alloc_cmbe,
  903. .free = free_cmbe,
  904. .set = set_cmbe,
  905. .read = read_cmbe,
  906. .readall = readall_cmbe,
  907. .reset = reset_cmbe,
  908. .attr_group = &cmf_attr_group_ext,
  909. };
  910. static ssize_t cmb_show_attr(struct device *dev, char *buf, enum cmb_index idx)
  911. {
  912. return sprintf(buf, "%lld\n",
  913. (unsigned long long) cmf_read(to_ccwdev(dev), idx));
  914. }
  915. static ssize_t cmb_show_avg_sample_interval(struct device *dev,
  916. struct device_attribute *attr,
  917. char *buf)
  918. {
  919. struct ccw_device *cdev;
  920. long interval;
  921. unsigned long count;
  922. struct cmb_data *cmb_data;
  923. cdev = to_ccwdev(dev);
  924. count = cmf_read(cdev, cmb_sample_count);
  925. spin_lock_irq(cdev->ccwlock);
  926. cmb_data = cdev->private->cmb;
  927. if (count) {
  928. interval = cmb_data->last_update -
  929. cdev->private->cmb_start_time;
  930. interval = (interval * 1000) >> 12;
  931. interval /= count;
  932. } else
  933. interval = -1;
  934. spin_unlock_irq(cdev->ccwlock);
  935. return sprintf(buf, "%ld\n", interval);
  936. }
  937. static ssize_t cmb_show_avg_utilization(struct device *dev,
  938. struct device_attribute *attr,
  939. char *buf)
  940. {
  941. struct cmbdata data;
  942. u64 utilization;
  943. unsigned long t, u;
  944. int ret;
  945. ret = cmf_readall(to_ccwdev(dev), &data);
  946. if (ret == -EAGAIN || ret == -ENODEV)
  947. /* No data (yet/currently) available to use for calculation. */
  948. return sprintf(buf, "n/a\n");
  949. else if (ret)
  950. return ret;
  951. utilization = data.device_connect_time +
  952. data.function_pending_time +
  953. data.device_disconnect_time;
  954. /* shift to avoid long long division */
  955. while (-1ul < (data.elapsed_time | utilization)) {
  956. utilization >>= 8;
  957. data.elapsed_time >>= 8;
  958. }
  959. /* calculate value in 0.1 percent units */
  960. t = (unsigned long) data.elapsed_time / 1000;
  961. u = (unsigned long) utilization / t;
  962. return sprintf(buf, "%02ld.%01ld%%\n", u/ 10, u - (u/ 10) * 10);
  963. }
  964. #define cmf_attr(name) \
  965. static ssize_t show_##name(struct device *dev, \
  966. struct device_attribute *attr, char *buf) \
  967. { return cmb_show_attr((dev), buf, cmb_##name); } \
  968. static DEVICE_ATTR(name, 0444, show_##name, NULL);
  969. #define cmf_attr_avg(name) \
  970. static ssize_t show_avg_##name(struct device *dev, \
  971. struct device_attribute *attr, char *buf) \
  972. { return cmb_show_attr((dev), buf, cmb_##name); } \
  973. static DEVICE_ATTR(avg_##name, 0444, show_avg_##name, NULL);
  974. cmf_attr(ssch_rsch_count);
  975. cmf_attr(sample_count);
  976. cmf_attr_avg(device_connect_time);
  977. cmf_attr_avg(function_pending_time);
  978. cmf_attr_avg(device_disconnect_time);
  979. cmf_attr_avg(control_unit_queuing_time);
  980. cmf_attr_avg(device_active_only_time);
  981. cmf_attr_avg(device_busy_time);
  982. cmf_attr_avg(initial_command_response_time);
  983. static DEVICE_ATTR(avg_sample_interval, 0444, cmb_show_avg_sample_interval,
  984. NULL);
  985. static DEVICE_ATTR(avg_utilization, 0444, cmb_show_avg_utilization, NULL);
  986. static struct attribute *cmf_attributes[] = {
  987. &dev_attr_avg_sample_interval.attr,
  988. &dev_attr_avg_utilization.attr,
  989. &dev_attr_ssch_rsch_count.attr,
  990. &dev_attr_sample_count.attr,
  991. &dev_attr_avg_device_connect_time.attr,
  992. &dev_attr_avg_function_pending_time.attr,
  993. &dev_attr_avg_device_disconnect_time.attr,
  994. &dev_attr_avg_control_unit_queuing_time.attr,
  995. &dev_attr_avg_device_active_only_time.attr,
  996. NULL,
  997. };
  998. static struct attribute_group cmf_attr_group = {
  999. .name = "cmf",
  1000. .attrs = cmf_attributes,
  1001. };
  1002. static struct attribute *cmf_attributes_ext[] = {
  1003. &dev_attr_avg_sample_interval.attr,
  1004. &dev_attr_avg_utilization.attr,
  1005. &dev_attr_ssch_rsch_count.attr,
  1006. &dev_attr_sample_count.attr,
  1007. &dev_attr_avg_device_connect_time.attr,
  1008. &dev_attr_avg_function_pending_time.attr,
  1009. &dev_attr_avg_device_disconnect_time.attr,
  1010. &dev_attr_avg_control_unit_queuing_time.attr,
  1011. &dev_attr_avg_device_active_only_time.attr,
  1012. &dev_attr_avg_device_busy_time.attr,
  1013. &dev_attr_avg_initial_command_response_time.attr,
  1014. NULL,
  1015. };
  1016. static struct attribute_group cmf_attr_group_ext = {
  1017. .name = "cmf",
  1018. .attrs = cmf_attributes_ext,
  1019. };
  1020. static ssize_t cmb_enable_show(struct device *dev,
  1021. struct device_attribute *attr,
  1022. char *buf)
  1023. {
  1024. struct ccw_device *cdev = to_ccwdev(dev);
  1025. return sprintf(buf, "%d\n", cmf_enabled(cdev));
  1026. }
  1027. static ssize_t cmb_enable_store(struct device *dev,
  1028. struct device_attribute *attr, const char *buf,
  1029. size_t c)
  1030. {
  1031. struct ccw_device *cdev = to_ccwdev(dev);
  1032. unsigned long val;
  1033. int ret;
  1034. ret = kstrtoul(buf, 16, &val);
  1035. if (ret)
  1036. return ret;
  1037. switch (val) {
  1038. case 0:
  1039. ret = disable_cmf(cdev);
  1040. break;
  1041. case 1:
  1042. ret = enable_cmf(cdev);
  1043. break;
  1044. default:
  1045. ret = -EINVAL;
  1046. }
  1047. return ret ? ret : c;
  1048. }
  1049. DEVICE_ATTR_RW(cmb_enable);
  1050. int ccw_set_cmf(struct ccw_device *cdev, int enable)
  1051. {
  1052. return cmbops->set(cdev, enable ? 2 : 0);
  1053. }
  1054. /**
  1055. * enable_cmf() - switch on the channel measurement for a specific device
  1056. * @cdev: The ccw device to be enabled
  1057. *
  1058. * Returns %0 for success or a negative error value.
  1059. * Note: If this is called on a device for which channel measurement is already
  1060. * enabled a reset of the measurement data is triggered.
  1061. * Context:
  1062. * non-atomic
  1063. */
  1064. int enable_cmf(struct ccw_device *cdev)
  1065. {
  1066. int ret = 0;
  1067. device_lock(&cdev->dev);
  1068. if (cmf_enabled(cdev)) {
  1069. cmbops->reset(cdev);
  1070. goto out_unlock;
  1071. }
  1072. get_device(&cdev->dev);
  1073. ret = cmbops->alloc(cdev);
  1074. if (ret)
  1075. goto out;
  1076. cmbops->reset(cdev);
  1077. ret = sysfs_create_group(&cdev->dev.kobj, cmbops->attr_group);
  1078. if (ret) {
  1079. cmbops->free(cdev);
  1080. goto out;
  1081. }
  1082. ret = cmbops->set(cdev, 2);
  1083. if (ret) {
  1084. sysfs_remove_group(&cdev->dev.kobj, cmbops->attr_group);
  1085. cmbops->free(cdev);
  1086. }
  1087. out:
  1088. if (ret)
  1089. put_device(&cdev->dev);
  1090. out_unlock:
  1091. device_unlock(&cdev->dev);
  1092. return ret;
  1093. }
  1094. /**
  1095. * __disable_cmf() - switch off the channel measurement for a specific device
  1096. * @cdev: The ccw device to be disabled
  1097. *
  1098. * Returns %0 for success or a negative error value.
  1099. *
  1100. * Context:
  1101. * non-atomic, device_lock() held.
  1102. */
  1103. int __disable_cmf(struct ccw_device *cdev)
  1104. {
  1105. int ret;
  1106. ret = cmbops->set(cdev, 0);
  1107. if (ret)
  1108. return ret;
  1109. sysfs_remove_group(&cdev->dev.kobj, cmbops->attr_group);
  1110. cmbops->free(cdev);
  1111. put_device(&cdev->dev);
  1112. return ret;
  1113. }
  1114. /**
  1115. * disable_cmf() - switch off the channel measurement for a specific device
  1116. * @cdev: The ccw device to be disabled
  1117. *
  1118. * Returns %0 for success or a negative error value.
  1119. *
  1120. * Context:
  1121. * non-atomic
  1122. */
  1123. int disable_cmf(struct ccw_device *cdev)
  1124. {
  1125. int ret;
  1126. device_lock(&cdev->dev);
  1127. ret = __disable_cmf(cdev);
  1128. device_unlock(&cdev->dev);
  1129. return ret;
  1130. }
  1131. /**
  1132. * cmf_read() - read one value from the current channel measurement block
  1133. * @cdev: the channel to be read
  1134. * @index: the index of the value to be read
  1135. *
  1136. * Returns the value read or %0 if the value cannot be read.
  1137. *
  1138. * Context:
  1139. * any
  1140. */
  1141. u64 cmf_read(struct ccw_device *cdev, int index)
  1142. {
  1143. return cmbops->read(cdev, index);
  1144. }
  1145. /**
  1146. * cmf_readall() - read the current channel measurement block
  1147. * @cdev: the channel to be read
  1148. * @data: a pointer to a data block that will be filled
  1149. *
  1150. * Returns %0 on success, a negative error value otherwise.
  1151. *
  1152. * Context:
  1153. * any
  1154. */
  1155. int cmf_readall(struct ccw_device *cdev, struct cmbdata *data)
  1156. {
  1157. return cmbops->readall(cdev, data);
  1158. }
  1159. /* Reenable cmf when a disconnected device becomes available again. */
  1160. int cmf_reenable(struct ccw_device *cdev)
  1161. {
  1162. cmbops->reset(cdev);
  1163. return cmbops->set(cdev, 2);
  1164. }
  1165. /**
  1166. * cmf_reactivate() - reactivate measurement block updates
  1167. *
  1168. * Use this during resume from hibernate.
  1169. */
  1170. void cmf_reactivate(void)
  1171. {
  1172. spin_lock(&cmb_area.lock);
  1173. if (!list_empty(&cmb_area.list))
  1174. cmf_activate(cmb_area.mem, CMF_ON);
  1175. spin_unlock(&cmb_area.lock);
  1176. }
  1177. static int __init init_cmbe(void)
  1178. {
  1179. cmbe_cache = kmem_cache_create("cmbe_cache", sizeof(struct cmbe),
  1180. __alignof__(struct cmbe), 0, NULL);
  1181. return cmbe_cache ? 0 : -ENOMEM;
  1182. }
  1183. static int __init init_cmf(void)
  1184. {
  1185. char *format_string;
  1186. char *detect_string;
  1187. int ret;
  1188. /*
  1189. * If the user did not give a parameter, see if we are running on a
  1190. * machine supporting extended measurement blocks, otherwise fall back
  1191. * to basic mode.
  1192. */
  1193. if (format == CMF_AUTODETECT) {
  1194. if (!css_general_characteristics.ext_mb) {
  1195. format = CMF_BASIC;
  1196. } else {
  1197. format = CMF_EXTENDED;
  1198. }
  1199. detect_string = "autodetected";
  1200. } else {
  1201. detect_string = "parameter";
  1202. }
  1203. switch (format) {
  1204. case CMF_BASIC:
  1205. format_string = "basic";
  1206. cmbops = &cmbops_basic;
  1207. break;
  1208. case CMF_EXTENDED:
  1209. format_string = "extended";
  1210. cmbops = &cmbops_extended;
  1211. ret = init_cmbe();
  1212. if (ret)
  1213. return ret;
  1214. break;
  1215. default:
  1216. return -EINVAL;
  1217. }
  1218. pr_info("Channel measurement facility initialized using format "
  1219. "%s (mode %s)\n", format_string, detect_string);
  1220. return 0;
  1221. }
  1222. module_init(init_cmf);
  1223. MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
  1224. MODULE_LICENSE("GPL");
  1225. MODULE_DESCRIPTION("channel measurement facility base driver\n"
  1226. "Copyright IBM Corp. 2003\n");
  1227. EXPORT_SYMBOL_GPL(enable_cmf);
  1228. EXPORT_SYMBOL_GPL(disable_cmf);
  1229. EXPORT_SYMBOL_GPL(cmf_read);
  1230. EXPORT_SYMBOL_GPL(cmf_readall);