cmf.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350
  1. /*
  2. * linux/drivers/s390/cio/cmf.c
  3. *
  4. * Linux on zSeries Channel Measurement Facility support
  5. *
  6. * Copyright 2000,2006 IBM Corporation
  7. *
  8. * Authors: Arnd Bergmann <arndb@de.ibm.com>
  9. * Cornelia Huck <cornelia.huck@de.ibm.com>
  10. *
  11. * original idea from Natarajan Krishnaswami <nkrishna@us.ibm.com>
  12. *
  13. * This program is free software; you can redistribute it and/or modify
  14. * it under the terms of the GNU General Public License as published by
  15. * the Free Software Foundation; either version 2, or (at your option)
  16. * any later version.
  17. *
  18. * This program is distributed in the hope that it will be useful,
  19. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  21. * GNU General Public License for more details.
  22. *
  23. * You should have received a copy of the GNU General Public License
  24. * along with this program; if not, write to the Free Software
  25. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  26. */
  27. #define KMSG_COMPONENT "cio"
  28. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  29. #include <linux/bootmem.h>
  30. #include <linux/device.h>
  31. #include <linux/init.h>
  32. #include <linux/list.h>
  33. #include <linux/module.h>
  34. #include <linux/moduleparam.h>
  35. #include <linux/slab.h>
  36. #include <linux/timex.h> /* get_clock() */
  37. #include <asm/ccwdev.h>
  38. #include <asm/cio.h>
  39. #include <asm/cmb.h>
  40. #include <asm/div64.h>
  41. #include "cio.h"
  42. #include "css.h"
  43. #include "device.h"
  44. #include "ioasm.h"
  45. #include "chsc.h"
  46. /*
  47. * parameter to enable cmf during boot, possible uses are:
  48. * "s390cmf" -- enable cmf and allocate 2 MB of ram so measuring can be
  49. * used on any subchannel
  50. * "s390cmf=<num>" -- enable cmf and allocate enough memory to measure
  51. * <num> subchannel, where <num> is an integer
  52. * between 1 and 65535, default is 1024
  53. */
  54. #define ARGSTRING "s390cmf"
  55. /* indices for READCMB */
  56. enum cmb_index {
  57. /* basic and exended format: */
  58. cmb_ssch_rsch_count,
  59. cmb_sample_count,
  60. cmb_device_connect_time,
  61. cmb_function_pending_time,
  62. cmb_device_disconnect_time,
  63. cmb_control_unit_queuing_time,
  64. cmb_device_active_only_time,
  65. /* extended format only: */
  66. cmb_device_busy_time,
  67. cmb_initial_command_response_time,
  68. };
  69. /**
  70. * enum cmb_format - types of supported measurement block formats
  71. *
  72. * @CMF_BASIC: traditional channel measurement blocks supported
  73. * by all machines that we run on
  74. * @CMF_EXTENDED: improved format that was introduced with the z990
  75. * machine
  76. * @CMF_AUTODETECT: default: use extended format when running on a machine
  77. * supporting extended format, otherwise fall back to
  78. * basic format
  79. */
  80. enum cmb_format {
  81. CMF_BASIC,
  82. CMF_EXTENDED,
  83. CMF_AUTODETECT = -1,
  84. };
  85. /*
  86. * format - actual format for all measurement blocks
  87. *
  88. * The format module parameter can be set to a value of 0 (zero)
  89. * or 1, indicating basic or extended format as described for
  90. * enum cmb_format.
  91. */
  92. static int format = CMF_AUTODETECT;
  93. module_param(format, bint, 0444);
  94. /**
  95. * struct cmb_operations - functions to use depending on cmb_format
  96. *
  97. * Most of these functions operate on a struct ccw_device. There is only
  98. * one instance of struct cmb_operations because the format of the measurement
  99. * data is guaranteed to be the same for every ccw_device.
  100. *
  101. * @alloc: allocate memory for a channel measurement block,
  102. * either with the help of a special pool or with kmalloc
  103. * @free: free memory allocated with @alloc
  104. * @set: enable or disable measurement
  105. * @read: read a measurement entry at an index
  106. * @readall: read a measurement block in a common format
  107. * @reset: clear the data in the associated measurement block and
  108. * reset its time stamp
  109. * @align: align an allocated block so that the hardware can use it
  110. */
  111. struct cmb_operations {
  112. int (*alloc) (struct ccw_device *);
  113. void (*free) (struct ccw_device *);
  114. int (*set) (struct ccw_device *, u32);
  115. u64 (*read) (struct ccw_device *, int);
  116. int (*readall)(struct ccw_device *, struct cmbdata *);
  117. void (*reset) (struct ccw_device *);
  118. void *(*align) (void *);
  119. /* private: */
  120. struct attribute_group *attr_group;
  121. };
  122. static struct cmb_operations *cmbops;
  123. struct cmb_data {
  124. void *hw_block; /* Pointer to block updated by hardware */
  125. void *last_block; /* Last changed block copied from hardware block */
  126. int size; /* Size of hw_block and last_block */
  127. unsigned long long last_update; /* when last_block was updated */
  128. };
  129. /*
  130. * Our user interface is designed in terms of nanoseconds,
  131. * while the hardware measures total times in its own
  132. * unit.
  133. */
  134. static inline u64 time_to_nsec(u32 value)
  135. {
  136. return ((u64)value) * 128000ull;
  137. }
  138. /*
  139. * Users are usually interested in average times,
  140. * not accumulated time.
  141. * This also helps us with atomicity problems
  142. * when reading sinlge values.
  143. */
  144. static inline u64 time_to_avg_nsec(u32 value, u32 count)
  145. {
  146. u64 ret;
  147. /* no samples yet, avoid division by 0 */
  148. if (count == 0)
  149. return 0;
  150. /* value comes in units of 128 µsec */
  151. ret = time_to_nsec(value);
  152. do_div(ret, count);
  153. return ret;
  154. }
  155. /*
  156. * Activate or deactivate the channel monitor. When area is NULL,
  157. * the monitor is deactivated. The channel monitor needs to
  158. * be active in order to measure subchannels, which also need
  159. * to be enabled.
  160. */
  161. static inline void cmf_activate(void *area, unsigned int onoff)
  162. {
  163. register void * __gpr2 asm("2");
  164. register long __gpr1 asm("1");
  165. __gpr2 = area;
  166. __gpr1 = onoff ? 2 : 0;
  167. /* activate channel measurement */
  168. asm("schm" : : "d" (__gpr2), "d" (__gpr1) );
  169. }
  170. static int set_schib(struct ccw_device *cdev, u32 mme, int mbfc,
  171. unsigned long address)
  172. {
  173. struct subchannel *sch;
  174. sch = to_subchannel(cdev->dev.parent);
  175. sch->config.mme = mme;
  176. sch->config.mbfc = mbfc;
  177. /* address can be either a block address or a block index */
  178. if (mbfc)
  179. sch->config.mba = address;
  180. else
  181. sch->config.mbi = address;
  182. return cio_commit_config(sch);
  183. }
  184. struct set_schib_struct {
  185. u32 mme;
  186. int mbfc;
  187. unsigned long address;
  188. wait_queue_head_t wait;
  189. int ret;
  190. struct kref kref;
  191. };
  192. static void cmf_set_schib_release(struct kref *kref)
  193. {
  194. struct set_schib_struct *set_data;
  195. set_data = container_of(kref, struct set_schib_struct, kref);
  196. kfree(set_data);
  197. }
  198. #define CMF_PENDING 1
  199. static int set_schib_wait(struct ccw_device *cdev, u32 mme,
  200. int mbfc, unsigned long address)
  201. {
  202. struct set_schib_struct *set_data;
  203. int ret;
  204. spin_lock_irq(cdev->ccwlock);
  205. if (!cdev->private->cmb) {
  206. ret = -ENODEV;
  207. goto out;
  208. }
  209. set_data = kzalloc(sizeof(struct set_schib_struct), GFP_ATOMIC);
  210. if (!set_data) {
  211. ret = -ENOMEM;
  212. goto out;
  213. }
  214. init_waitqueue_head(&set_data->wait);
  215. kref_init(&set_data->kref);
  216. set_data->mme = mme;
  217. set_data->mbfc = mbfc;
  218. set_data->address = address;
  219. ret = set_schib(cdev, mme, mbfc, address);
  220. if (ret != -EBUSY)
  221. goto out_put;
  222. if (cdev->private->state != DEV_STATE_ONLINE) {
  223. /* if the device is not online, don't even try again */
  224. ret = -EBUSY;
  225. goto out_put;
  226. }
  227. cdev->private->state = DEV_STATE_CMFCHANGE;
  228. set_data->ret = CMF_PENDING;
  229. cdev->private->cmb_wait = set_data;
  230. spin_unlock_irq(cdev->ccwlock);
  231. if (wait_event_interruptible(set_data->wait,
  232. set_data->ret != CMF_PENDING)) {
  233. spin_lock_irq(cdev->ccwlock);
  234. if (set_data->ret == CMF_PENDING) {
  235. set_data->ret = -ERESTARTSYS;
  236. if (cdev->private->state == DEV_STATE_CMFCHANGE)
  237. cdev->private->state = DEV_STATE_ONLINE;
  238. }
  239. spin_unlock_irq(cdev->ccwlock);
  240. }
  241. spin_lock_irq(cdev->ccwlock);
  242. cdev->private->cmb_wait = NULL;
  243. ret = set_data->ret;
  244. out_put:
  245. kref_put(&set_data->kref, cmf_set_schib_release);
  246. out:
  247. spin_unlock_irq(cdev->ccwlock);
  248. return ret;
  249. }
  250. void retry_set_schib(struct ccw_device *cdev)
  251. {
  252. struct set_schib_struct *set_data;
  253. set_data = cdev->private->cmb_wait;
  254. if (!set_data) {
  255. WARN_ON(1);
  256. return;
  257. }
  258. kref_get(&set_data->kref);
  259. set_data->ret = set_schib(cdev, set_data->mme, set_data->mbfc,
  260. set_data->address);
  261. wake_up(&set_data->wait);
  262. kref_put(&set_data->kref, cmf_set_schib_release);
  263. }
  264. static int cmf_copy_block(struct ccw_device *cdev)
  265. {
  266. struct subchannel *sch;
  267. void *reference_buf;
  268. void *hw_block;
  269. struct cmb_data *cmb_data;
  270. sch = to_subchannel(cdev->dev.parent);
  271. if (cio_update_schib(sch))
  272. return -ENODEV;
  273. if (scsw_fctl(&sch->schib.scsw) & SCSW_FCTL_START_FUNC) {
  274. /* Don't copy if a start function is in progress. */
  275. if ((!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_SUSPENDED)) &&
  276. (scsw_actl(&sch->schib.scsw) &
  277. (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) &&
  278. (!(scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_SEC_STATUS)))
  279. return -EBUSY;
  280. }
  281. cmb_data = cdev->private->cmb;
  282. hw_block = cmbops->align(cmb_data->hw_block);
  283. if (!memcmp(cmb_data->last_block, hw_block, cmb_data->size))
  284. /* No need to copy. */
  285. return 0;
  286. reference_buf = kzalloc(cmb_data->size, GFP_ATOMIC);
  287. if (!reference_buf)
  288. return -ENOMEM;
  289. /* Ensure consistency of block copied from hardware. */
  290. do {
  291. memcpy(cmb_data->last_block, hw_block, cmb_data->size);
  292. memcpy(reference_buf, hw_block, cmb_data->size);
  293. } while (memcmp(cmb_data->last_block, reference_buf, cmb_data->size));
  294. cmb_data->last_update = get_clock();
  295. kfree(reference_buf);
  296. return 0;
  297. }
  298. struct copy_block_struct {
  299. wait_queue_head_t wait;
  300. int ret;
  301. struct kref kref;
  302. };
  303. static void cmf_copy_block_release(struct kref *kref)
  304. {
  305. struct copy_block_struct *copy_block;
  306. copy_block = container_of(kref, struct copy_block_struct, kref);
  307. kfree(copy_block);
  308. }
  309. static int cmf_cmb_copy_wait(struct ccw_device *cdev)
  310. {
  311. struct copy_block_struct *copy_block;
  312. int ret;
  313. unsigned long flags;
  314. spin_lock_irqsave(cdev->ccwlock, flags);
  315. if (!cdev->private->cmb) {
  316. ret = -ENODEV;
  317. goto out;
  318. }
  319. copy_block = kzalloc(sizeof(struct copy_block_struct), GFP_ATOMIC);
  320. if (!copy_block) {
  321. ret = -ENOMEM;
  322. goto out;
  323. }
  324. init_waitqueue_head(&copy_block->wait);
  325. kref_init(&copy_block->kref);
  326. ret = cmf_copy_block(cdev);
  327. if (ret != -EBUSY)
  328. goto out_put;
  329. if (cdev->private->state != DEV_STATE_ONLINE) {
  330. ret = -EBUSY;
  331. goto out_put;
  332. }
  333. cdev->private->state = DEV_STATE_CMFUPDATE;
  334. copy_block->ret = CMF_PENDING;
  335. cdev->private->cmb_wait = copy_block;
  336. spin_unlock_irqrestore(cdev->ccwlock, flags);
  337. if (wait_event_interruptible(copy_block->wait,
  338. copy_block->ret != CMF_PENDING)) {
  339. spin_lock_irqsave(cdev->ccwlock, flags);
  340. if (copy_block->ret == CMF_PENDING) {
  341. copy_block->ret = -ERESTARTSYS;
  342. if (cdev->private->state == DEV_STATE_CMFUPDATE)
  343. cdev->private->state = DEV_STATE_ONLINE;
  344. }
  345. spin_unlock_irqrestore(cdev->ccwlock, flags);
  346. }
  347. spin_lock_irqsave(cdev->ccwlock, flags);
  348. cdev->private->cmb_wait = NULL;
  349. ret = copy_block->ret;
  350. out_put:
  351. kref_put(&copy_block->kref, cmf_copy_block_release);
  352. out:
  353. spin_unlock_irqrestore(cdev->ccwlock, flags);
  354. return ret;
  355. }
  356. void cmf_retry_copy_block(struct ccw_device *cdev)
  357. {
  358. struct copy_block_struct *copy_block;
  359. copy_block = cdev->private->cmb_wait;
  360. if (!copy_block) {
  361. WARN_ON(1);
  362. return;
  363. }
  364. kref_get(&copy_block->kref);
  365. copy_block->ret = cmf_copy_block(cdev);
  366. wake_up(&copy_block->wait);
  367. kref_put(&copy_block->kref, cmf_copy_block_release);
  368. }
  369. static void cmf_generic_reset(struct ccw_device *cdev)
  370. {
  371. struct cmb_data *cmb_data;
  372. spin_lock_irq(cdev->ccwlock);
  373. cmb_data = cdev->private->cmb;
  374. if (cmb_data) {
  375. memset(cmb_data->last_block, 0, cmb_data->size);
  376. /*
  377. * Need to reset hw block as well to make the hardware start
  378. * from 0 again.
  379. */
  380. memset(cmbops->align(cmb_data->hw_block), 0, cmb_data->size);
  381. cmb_data->last_update = 0;
  382. }
  383. cdev->private->cmb_start_time = get_clock();
  384. spin_unlock_irq(cdev->ccwlock);
  385. }
  386. /**
  387. * struct cmb_area - container for global cmb data
  388. *
  389. * @mem: pointer to CMBs (only in basic measurement mode)
  390. * @list: contains a linked list of all subchannels
  391. * @num_channels: number of channels to be measured
  392. * @lock: protect concurrent access to @mem and @list
  393. */
  394. struct cmb_area {
  395. struct cmb *mem;
  396. struct list_head list;
  397. int num_channels;
  398. spinlock_t lock;
  399. };
  400. static struct cmb_area cmb_area = {
  401. .lock = __SPIN_LOCK_UNLOCKED(cmb_area.lock),
  402. .list = LIST_HEAD_INIT(cmb_area.list),
  403. .num_channels = 1024,
  404. };
  405. /* ****** old style CMB handling ********/
  406. /*
  407. * Basic channel measurement blocks are allocated in one contiguous
  408. * block of memory, which can not be moved as long as any channel
  409. * is active. Therefore, a maximum number of subchannels needs to
  410. * be defined somewhere. This is a module parameter, defaulting to
  411. * a reasonable value of 1024, or 32 kb of memory.
  412. * Current kernels don't allow kmalloc with more than 128kb, so the
  413. * maximum is 4096.
  414. */
  415. module_param_named(maxchannels, cmb_area.num_channels, uint, 0444);
  416. /**
  417. * struct cmb - basic channel measurement block
  418. * @ssch_rsch_count: number of ssch and rsch
  419. * @sample_count: number of samples
  420. * @device_connect_time: time of device connect
  421. * @function_pending_time: time of function pending
  422. * @device_disconnect_time: time of device disconnect
  423. * @control_unit_queuing_time: time of control unit queuing
  424. * @device_active_only_time: time of device active only
  425. * @reserved: unused in basic measurement mode
  426. *
  427. * The measurement block as used by the hardware. The fields are described
  428. * further in z/Architecture Principles of Operation, chapter 17.
  429. *
  430. * The cmb area made up from these blocks must be a contiguous array and may
  431. * not be reallocated or freed.
  432. * Only one cmb area can be present in the system.
  433. */
  434. struct cmb {
  435. u16 ssch_rsch_count;
  436. u16 sample_count;
  437. u32 device_connect_time;
  438. u32 function_pending_time;
  439. u32 device_disconnect_time;
  440. u32 control_unit_queuing_time;
  441. u32 device_active_only_time;
  442. u32 reserved[2];
  443. };
  444. /*
  445. * Insert a single device into the cmb_area list.
  446. * Called with cmb_area.lock held from alloc_cmb.
  447. */
  448. static int alloc_cmb_single(struct ccw_device *cdev,
  449. struct cmb_data *cmb_data)
  450. {
  451. struct cmb *cmb;
  452. struct ccw_device_private *node;
  453. int ret;
  454. spin_lock_irq(cdev->ccwlock);
  455. if (!list_empty(&cdev->private->cmb_list)) {
  456. ret = -EBUSY;
  457. goto out;
  458. }
  459. /*
  460. * Find first unused cmb in cmb_area.mem.
  461. * This is a little tricky: cmb_area.list
  462. * remains sorted by ->cmb->hw_data pointers.
  463. */
  464. cmb = cmb_area.mem;
  465. list_for_each_entry(node, &cmb_area.list, cmb_list) {
  466. struct cmb_data *data;
  467. data = node->cmb;
  468. if ((struct cmb*)data->hw_block > cmb)
  469. break;
  470. cmb++;
  471. }
  472. if (cmb - cmb_area.mem >= cmb_area.num_channels) {
  473. ret = -ENOMEM;
  474. goto out;
  475. }
  476. /* insert new cmb */
  477. list_add_tail(&cdev->private->cmb_list, &node->cmb_list);
  478. cmb_data->hw_block = cmb;
  479. cdev->private->cmb = cmb_data;
  480. ret = 0;
  481. out:
  482. spin_unlock_irq(cdev->ccwlock);
  483. return ret;
  484. }
  485. static int alloc_cmb(struct ccw_device *cdev)
  486. {
  487. int ret;
  488. struct cmb *mem;
  489. ssize_t size;
  490. struct cmb_data *cmb_data;
  491. /* Allocate private cmb_data. */
  492. cmb_data = kzalloc(sizeof(struct cmb_data), GFP_KERNEL);
  493. if (!cmb_data)
  494. return -ENOMEM;
  495. cmb_data->last_block = kzalloc(sizeof(struct cmb), GFP_KERNEL);
  496. if (!cmb_data->last_block) {
  497. kfree(cmb_data);
  498. return -ENOMEM;
  499. }
  500. cmb_data->size = sizeof(struct cmb);
  501. spin_lock(&cmb_area.lock);
  502. if (!cmb_area.mem) {
  503. /* there is no user yet, so we need a new area */
  504. size = sizeof(struct cmb) * cmb_area.num_channels;
  505. WARN_ON(!list_empty(&cmb_area.list));
  506. spin_unlock(&cmb_area.lock);
  507. mem = (void*)__get_free_pages(GFP_KERNEL | GFP_DMA,
  508. get_order(size));
  509. spin_lock(&cmb_area.lock);
  510. if (cmb_area.mem) {
  511. /* ok, another thread was faster */
  512. free_pages((unsigned long)mem, get_order(size));
  513. } else if (!mem) {
  514. /* no luck */
  515. ret = -ENOMEM;
  516. goto out;
  517. } else {
  518. /* everything ok */
  519. memset(mem, 0, size);
  520. cmb_area.mem = mem;
  521. cmf_activate(cmb_area.mem, 1);
  522. }
  523. }
  524. /* do the actual allocation */
  525. ret = alloc_cmb_single(cdev, cmb_data);
  526. out:
  527. spin_unlock(&cmb_area.lock);
  528. if (ret) {
  529. kfree(cmb_data->last_block);
  530. kfree(cmb_data);
  531. }
  532. return ret;
  533. }
  534. static void free_cmb(struct ccw_device *cdev)
  535. {
  536. struct ccw_device_private *priv;
  537. struct cmb_data *cmb_data;
  538. spin_lock(&cmb_area.lock);
  539. spin_lock_irq(cdev->ccwlock);
  540. priv = cdev->private;
  541. if (list_empty(&priv->cmb_list)) {
  542. /* already freed */
  543. goto out;
  544. }
  545. cmb_data = priv->cmb;
  546. priv->cmb = NULL;
  547. if (cmb_data)
  548. kfree(cmb_data->last_block);
  549. kfree(cmb_data);
  550. list_del_init(&priv->cmb_list);
  551. if (list_empty(&cmb_area.list)) {
  552. ssize_t size;
  553. size = sizeof(struct cmb) * cmb_area.num_channels;
  554. cmf_activate(NULL, 0);
  555. free_pages((unsigned long)cmb_area.mem, get_order(size));
  556. cmb_area.mem = NULL;
  557. }
  558. out:
  559. spin_unlock_irq(cdev->ccwlock);
  560. spin_unlock(&cmb_area.lock);
  561. }
  562. static int set_cmb(struct ccw_device *cdev, u32 mme)
  563. {
  564. u16 offset;
  565. struct cmb_data *cmb_data;
  566. unsigned long flags;
  567. spin_lock_irqsave(cdev->ccwlock, flags);
  568. if (!cdev->private->cmb) {
  569. spin_unlock_irqrestore(cdev->ccwlock, flags);
  570. return -EINVAL;
  571. }
  572. cmb_data = cdev->private->cmb;
  573. offset = mme ? (struct cmb *)cmb_data->hw_block - cmb_area.mem : 0;
  574. spin_unlock_irqrestore(cdev->ccwlock, flags);
  575. return set_schib_wait(cdev, mme, 0, offset);
  576. }
  577. static u64 read_cmb(struct ccw_device *cdev, int index)
  578. {
  579. struct cmb *cmb;
  580. u32 val;
  581. int ret;
  582. unsigned long flags;
  583. ret = cmf_cmb_copy_wait(cdev);
  584. if (ret < 0)
  585. return 0;
  586. spin_lock_irqsave(cdev->ccwlock, flags);
  587. if (!cdev->private->cmb) {
  588. ret = 0;
  589. goto out;
  590. }
  591. cmb = ((struct cmb_data *)cdev->private->cmb)->last_block;
  592. switch (index) {
  593. case cmb_ssch_rsch_count:
  594. ret = cmb->ssch_rsch_count;
  595. goto out;
  596. case cmb_sample_count:
  597. ret = cmb->sample_count;
  598. goto out;
  599. case cmb_device_connect_time:
  600. val = cmb->device_connect_time;
  601. break;
  602. case cmb_function_pending_time:
  603. val = cmb->function_pending_time;
  604. break;
  605. case cmb_device_disconnect_time:
  606. val = cmb->device_disconnect_time;
  607. break;
  608. case cmb_control_unit_queuing_time:
  609. val = cmb->control_unit_queuing_time;
  610. break;
  611. case cmb_device_active_only_time:
  612. val = cmb->device_active_only_time;
  613. break;
  614. default:
  615. ret = 0;
  616. goto out;
  617. }
  618. ret = time_to_avg_nsec(val, cmb->sample_count);
  619. out:
  620. spin_unlock_irqrestore(cdev->ccwlock, flags);
  621. return ret;
  622. }
  623. static int readall_cmb(struct ccw_device *cdev, struct cmbdata *data)
  624. {
  625. struct cmb *cmb;
  626. struct cmb_data *cmb_data;
  627. u64 time;
  628. unsigned long flags;
  629. int ret;
  630. ret = cmf_cmb_copy_wait(cdev);
  631. if (ret < 0)
  632. return ret;
  633. spin_lock_irqsave(cdev->ccwlock, flags);
  634. cmb_data = cdev->private->cmb;
  635. if (!cmb_data) {
  636. ret = -ENODEV;
  637. goto out;
  638. }
  639. if (cmb_data->last_update == 0) {
  640. ret = -EAGAIN;
  641. goto out;
  642. }
  643. cmb = cmb_data->last_block;
  644. time = cmb_data->last_update - cdev->private->cmb_start_time;
  645. memset(data, 0, sizeof(struct cmbdata));
  646. /* we only know values before device_busy_time */
  647. data->size = offsetof(struct cmbdata, device_busy_time);
  648. /* convert to nanoseconds */
  649. data->elapsed_time = (time * 1000) >> 12;
  650. /* copy data to new structure */
  651. data->ssch_rsch_count = cmb->ssch_rsch_count;
  652. data->sample_count = cmb->sample_count;
  653. /* time fields are converted to nanoseconds while copying */
  654. data->device_connect_time = time_to_nsec(cmb->device_connect_time);
  655. data->function_pending_time = time_to_nsec(cmb->function_pending_time);
  656. data->device_disconnect_time =
  657. time_to_nsec(cmb->device_disconnect_time);
  658. data->control_unit_queuing_time
  659. = time_to_nsec(cmb->control_unit_queuing_time);
  660. data->device_active_only_time
  661. = time_to_nsec(cmb->device_active_only_time);
  662. ret = 0;
  663. out:
  664. spin_unlock_irqrestore(cdev->ccwlock, flags);
  665. return ret;
  666. }
  667. static void reset_cmb(struct ccw_device *cdev)
  668. {
  669. cmf_generic_reset(cdev);
  670. }
  671. static void * align_cmb(void *area)
  672. {
  673. return area;
  674. }
  675. static struct attribute_group cmf_attr_group;
  676. static struct cmb_operations cmbops_basic = {
  677. .alloc = alloc_cmb,
  678. .free = free_cmb,
  679. .set = set_cmb,
  680. .read = read_cmb,
  681. .readall = readall_cmb,
  682. .reset = reset_cmb,
  683. .align = align_cmb,
  684. .attr_group = &cmf_attr_group,
  685. };
  686. /* ******** extended cmb handling ********/
  687. /**
  688. * struct cmbe - extended channel measurement block
  689. * @ssch_rsch_count: number of ssch and rsch
  690. * @sample_count: number of samples
  691. * @device_connect_time: time of device connect
  692. * @function_pending_time: time of function pending
  693. * @device_disconnect_time: time of device disconnect
  694. * @control_unit_queuing_time: time of control unit queuing
  695. * @device_active_only_time: time of device active only
  696. * @device_busy_time: time of device busy
  697. * @initial_command_response_time: initial command response time
  698. * @reserved: unused
  699. *
  700. * The measurement block as used by the hardware. May be in any 64 bit physical
  701. * location.
  702. * The fields are described further in z/Architecture Principles of Operation,
  703. * third edition, chapter 17.
  704. */
  705. struct cmbe {
  706. u32 ssch_rsch_count;
  707. u32 sample_count;
  708. u32 device_connect_time;
  709. u32 function_pending_time;
  710. u32 device_disconnect_time;
  711. u32 control_unit_queuing_time;
  712. u32 device_active_only_time;
  713. u32 device_busy_time;
  714. u32 initial_command_response_time;
  715. u32 reserved[7];
  716. };
  717. /*
  718. * kmalloc only guarantees 8 byte alignment, but we need cmbe
  719. * pointers to be naturally aligned. Make sure to allocate
  720. * enough space for two cmbes.
  721. */
  722. static inline struct cmbe *cmbe_align(struct cmbe *c)
  723. {
  724. unsigned long addr;
  725. addr = ((unsigned long)c + sizeof (struct cmbe) - sizeof(long)) &
  726. ~(sizeof (struct cmbe) - sizeof(long));
  727. return (struct cmbe*)addr;
  728. }
  729. static int alloc_cmbe(struct ccw_device *cdev)
  730. {
  731. struct cmbe *cmbe;
  732. struct cmb_data *cmb_data;
  733. int ret;
  734. cmbe = kzalloc (sizeof (*cmbe) * 2, GFP_KERNEL);
  735. if (!cmbe)
  736. return -ENOMEM;
  737. cmb_data = kzalloc(sizeof(struct cmb_data), GFP_KERNEL);
  738. if (!cmb_data) {
  739. ret = -ENOMEM;
  740. goto out_free;
  741. }
  742. cmb_data->last_block = kzalloc(sizeof(struct cmbe), GFP_KERNEL);
  743. if (!cmb_data->last_block) {
  744. ret = -ENOMEM;
  745. goto out_free;
  746. }
  747. cmb_data->size = sizeof(struct cmbe);
  748. spin_lock_irq(cdev->ccwlock);
  749. if (cdev->private->cmb) {
  750. spin_unlock_irq(cdev->ccwlock);
  751. ret = -EBUSY;
  752. goto out_free;
  753. }
  754. cmb_data->hw_block = cmbe;
  755. cdev->private->cmb = cmb_data;
  756. spin_unlock_irq(cdev->ccwlock);
  757. /* activate global measurement if this is the first channel */
  758. spin_lock(&cmb_area.lock);
  759. if (list_empty(&cmb_area.list))
  760. cmf_activate(NULL, 1);
  761. list_add_tail(&cdev->private->cmb_list, &cmb_area.list);
  762. spin_unlock(&cmb_area.lock);
  763. return 0;
  764. out_free:
  765. if (cmb_data)
  766. kfree(cmb_data->last_block);
  767. kfree(cmb_data);
  768. kfree(cmbe);
  769. return ret;
  770. }
  771. static void free_cmbe(struct ccw_device *cdev)
  772. {
  773. struct cmb_data *cmb_data;
  774. spin_lock_irq(cdev->ccwlock);
  775. cmb_data = cdev->private->cmb;
  776. cdev->private->cmb = NULL;
  777. if (cmb_data)
  778. kfree(cmb_data->last_block);
  779. kfree(cmb_data);
  780. spin_unlock_irq(cdev->ccwlock);
  781. /* deactivate global measurement if this is the last channel */
  782. spin_lock(&cmb_area.lock);
  783. list_del_init(&cdev->private->cmb_list);
  784. if (list_empty(&cmb_area.list))
  785. cmf_activate(NULL, 0);
  786. spin_unlock(&cmb_area.lock);
  787. }
  788. static int set_cmbe(struct ccw_device *cdev, u32 mme)
  789. {
  790. unsigned long mba;
  791. struct cmb_data *cmb_data;
  792. unsigned long flags;
  793. spin_lock_irqsave(cdev->ccwlock, flags);
  794. if (!cdev->private->cmb) {
  795. spin_unlock_irqrestore(cdev->ccwlock, flags);
  796. return -EINVAL;
  797. }
  798. cmb_data = cdev->private->cmb;
  799. mba = mme ? (unsigned long) cmbe_align(cmb_data->hw_block) : 0;
  800. spin_unlock_irqrestore(cdev->ccwlock, flags);
  801. return set_schib_wait(cdev, mme, 1, mba);
  802. }
  803. static u64 read_cmbe(struct ccw_device *cdev, int index)
  804. {
  805. struct cmbe *cmb;
  806. struct cmb_data *cmb_data;
  807. u32 val;
  808. int ret;
  809. unsigned long flags;
  810. ret = cmf_cmb_copy_wait(cdev);
  811. if (ret < 0)
  812. return 0;
  813. spin_lock_irqsave(cdev->ccwlock, flags);
  814. cmb_data = cdev->private->cmb;
  815. if (!cmb_data) {
  816. ret = 0;
  817. goto out;
  818. }
  819. cmb = cmb_data->last_block;
  820. switch (index) {
  821. case cmb_ssch_rsch_count:
  822. ret = cmb->ssch_rsch_count;
  823. goto out;
  824. case cmb_sample_count:
  825. ret = cmb->sample_count;
  826. goto out;
  827. case cmb_device_connect_time:
  828. val = cmb->device_connect_time;
  829. break;
  830. case cmb_function_pending_time:
  831. val = cmb->function_pending_time;
  832. break;
  833. case cmb_device_disconnect_time:
  834. val = cmb->device_disconnect_time;
  835. break;
  836. case cmb_control_unit_queuing_time:
  837. val = cmb->control_unit_queuing_time;
  838. break;
  839. case cmb_device_active_only_time:
  840. val = cmb->device_active_only_time;
  841. break;
  842. case cmb_device_busy_time:
  843. val = cmb->device_busy_time;
  844. break;
  845. case cmb_initial_command_response_time:
  846. val = cmb->initial_command_response_time;
  847. break;
  848. default:
  849. ret = 0;
  850. goto out;
  851. }
  852. ret = time_to_avg_nsec(val, cmb->sample_count);
  853. out:
  854. spin_unlock_irqrestore(cdev->ccwlock, flags);
  855. return ret;
  856. }
  857. static int readall_cmbe(struct ccw_device *cdev, struct cmbdata *data)
  858. {
  859. struct cmbe *cmb;
  860. struct cmb_data *cmb_data;
  861. u64 time;
  862. unsigned long flags;
  863. int ret;
  864. ret = cmf_cmb_copy_wait(cdev);
  865. if (ret < 0)
  866. return ret;
  867. spin_lock_irqsave(cdev->ccwlock, flags);
  868. cmb_data = cdev->private->cmb;
  869. if (!cmb_data) {
  870. ret = -ENODEV;
  871. goto out;
  872. }
  873. if (cmb_data->last_update == 0) {
  874. ret = -EAGAIN;
  875. goto out;
  876. }
  877. time = cmb_data->last_update - cdev->private->cmb_start_time;
  878. memset (data, 0, sizeof(struct cmbdata));
  879. /* we only know values before device_busy_time */
  880. data->size = offsetof(struct cmbdata, device_busy_time);
  881. /* conver to nanoseconds */
  882. data->elapsed_time = (time * 1000) >> 12;
  883. cmb = cmb_data->last_block;
  884. /* copy data to new structure */
  885. data->ssch_rsch_count = cmb->ssch_rsch_count;
  886. data->sample_count = cmb->sample_count;
  887. /* time fields are converted to nanoseconds while copying */
  888. data->device_connect_time = time_to_nsec(cmb->device_connect_time);
  889. data->function_pending_time = time_to_nsec(cmb->function_pending_time);
  890. data->device_disconnect_time =
  891. time_to_nsec(cmb->device_disconnect_time);
  892. data->control_unit_queuing_time
  893. = time_to_nsec(cmb->control_unit_queuing_time);
  894. data->device_active_only_time
  895. = time_to_nsec(cmb->device_active_only_time);
  896. data->device_busy_time = time_to_nsec(cmb->device_busy_time);
  897. data->initial_command_response_time
  898. = time_to_nsec(cmb->initial_command_response_time);
  899. ret = 0;
  900. out:
  901. spin_unlock_irqrestore(cdev->ccwlock, flags);
  902. return ret;
  903. }
  904. static void reset_cmbe(struct ccw_device *cdev)
  905. {
  906. cmf_generic_reset(cdev);
  907. }
  908. static void * align_cmbe(void *area)
  909. {
  910. return cmbe_align(area);
  911. }
  912. static struct attribute_group cmf_attr_group_ext;
  913. static struct cmb_operations cmbops_extended = {
  914. .alloc = alloc_cmbe,
  915. .free = free_cmbe,
  916. .set = set_cmbe,
  917. .read = read_cmbe,
  918. .readall = readall_cmbe,
  919. .reset = reset_cmbe,
  920. .align = align_cmbe,
  921. .attr_group = &cmf_attr_group_ext,
  922. };
  923. static ssize_t cmb_show_attr(struct device *dev, char *buf, enum cmb_index idx)
  924. {
  925. return sprintf(buf, "%lld\n",
  926. (unsigned long long) cmf_read(to_ccwdev(dev), idx));
  927. }
  928. static ssize_t cmb_show_avg_sample_interval(struct device *dev,
  929. struct device_attribute *attr,
  930. char *buf)
  931. {
  932. struct ccw_device *cdev;
  933. long interval;
  934. unsigned long count;
  935. struct cmb_data *cmb_data;
  936. cdev = to_ccwdev(dev);
  937. count = cmf_read(cdev, cmb_sample_count);
  938. spin_lock_irq(cdev->ccwlock);
  939. cmb_data = cdev->private->cmb;
  940. if (count) {
  941. interval = cmb_data->last_update -
  942. cdev->private->cmb_start_time;
  943. interval = (interval * 1000) >> 12;
  944. interval /= count;
  945. } else
  946. interval = -1;
  947. spin_unlock_irq(cdev->ccwlock);
  948. return sprintf(buf, "%ld\n", interval);
  949. }
  950. static ssize_t cmb_show_avg_utilization(struct device *dev,
  951. struct device_attribute *attr,
  952. char *buf)
  953. {
  954. struct cmbdata data;
  955. u64 utilization;
  956. unsigned long t, u;
  957. int ret;
  958. ret = cmf_readall(to_ccwdev(dev), &data);
  959. if (ret == -EAGAIN || ret == -ENODEV)
  960. /* No data (yet/currently) available to use for calculation. */
  961. return sprintf(buf, "n/a\n");
  962. else if (ret)
  963. return ret;
  964. utilization = data.device_connect_time +
  965. data.function_pending_time +
  966. data.device_disconnect_time;
  967. /* shift to avoid long long division */
  968. while (-1ul < (data.elapsed_time | utilization)) {
  969. utilization >>= 8;
  970. data.elapsed_time >>= 8;
  971. }
  972. /* calculate value in 0.1 percent units */
  973. t = (unsigned long) data.elapsed_time / 1000;
  974. u = (unsigned long) utilization / t;
  975. return sprintf(buf, "%02ld.%01ld%%\n", u/ 10, u - (u/ 10) * 10);
  976. }
  977. #define cmf_attr(name) \
  978. static ssize_t show_##name(struct device *dev, \
  979. struct device_attribute *attr, char *buf) \
  980. { return cmb_show_attr((dev), buf, cmb_##name); } \
  981. static DEVICE_ATTR(name, 0444, show_##name, NULL);
  982. #define cmf_attr_avg(name) \
  983. static ssize_t show_avg_##name(struct device *dev, \
  984. struct device_attribute *attr, char *buf) \
  985. { return cmb_show_attr((dev), buf, cmb_##name); } \
  986. static DEVICE_ATTR(avg_##name, 0444, show_avg_##name, NULL);
  987. cmf_attr(ssch_rsch_count);
  988. cmf_attr(sample_count);
  989. cmf_attr_avg(device_connect_time);
  990. cmf_attr_avg(function_pending_time);
  991. cmf_attr_avg(device_disconnect_time);
  992. cmf_attr_avg(control_unit_queuing_time);
  993. cmf_attr_avg(device_active_only_time);
  994. cmf_attr_avg(device_busy_time);
  995. cmf_attr_avg(initial_command_response_time);
  996. static DEVICE_ATTR(avg_sample_interval, 0444, cmb_show_avg_sample_interval,
  997. NULL);
  998. static DEVICE_ATTR(avg_utilization, 0444, cmb_show_avg_utilization, NULL);
  999. static struct attribute *cmf_attributes[] = {
  1000. &dev_attr_avg_sample_interval.attr,
  1001. &dev_attr_avg_utilization.attr,
  1002. &dev_attr_ssch_rsch_count.attr,
  1003. &dev_attr_sample_count.attr,
  1004. &dev_attr_avg_device_connect_time.attr,
  1005. &dev_attr_avg_function_pending_time.attr,
  1006. &dev_attr_avg_device_disconnect_time.attr,
  1007. &dev_attr_avg_control_unit_queuing_time.attr,
  1008. &dev_attr_avg_device_active_only_time.attr,
  1009. NULL,
  1010. };
  1011. static struct attribute_group cmf_attr_group = {
  1012. .name = "cmf",
  1013. .attrs = cmf_attributes,
  1014. };
  1015. static struct attribute *cmf_attributes_ext[] = {
  1016. &dev_attr_avg_sample_interval.attr,
  1017. &dev_attr_avg_utilization.attr,
  1018. &dev_attr_ssch_rsch_count.attr,
  1019. &dev_attr_sample_count.attr,
  1020. &dev_attr_avg_device_connect_time.attr,
  1021. &dev_attr_avg_function_pending_time.attr,
  1022. &dev_attr_avg_device_disconnect_time.attr,
  1023. &dev_attr_avg_control_unit_queuing_time.attr,
  1024. &dev_attr_avg_device_active_only_time.attr,
  1025. &dev_attr_avg_device_busy_time.attr,
  1026. &dev_attr_avg_initial_command_response_time.attr,
  1027. NULL,
  1028. };
  1029. static struct attribute_group cmf_attr_group_ext = {
  1030. .name = "cmf",
  1031. .attrs = cmf_attributes_ext,
  1032. };
  1033. static ssize_t cmb_enable_show(struct device *dev,
  1034. struct device_attribute *attr,
  1035. char *buf)
  1036. {
  1037. return sprintf(buf, "%d\n", to_ccwdev(dev)->private->cmb ? 1 : 0);
  1038. }
  1039. static ssize_t cmb_enable_store(struct device *dev,
  1040. struct device_attribute *attr, const char *buf,
  1041. size_t c)
  1042. {
  1043. struct ccw_device *cdev;
  1044. int ret;
  1045. unsigned long val;
  1046. ret = strict_strtoul(buf, 16, &val);
  1047. if (ret)
  1048. return ret;
  1049. cdev = to_ccwdev(dev);
  1050. switch (val) {
  1051. case 0:
  1052. ret = disable_cmf(cdev);
  1053. break;
  1054. case 1:
  1055. ret = enable_cmf(cdev);
  1056. break;
  1057. }
  1058. return c;
  1059. }
  1060. DEVICE_ATTR(cmb_enable, 0644, cmb_enable_show, cmb_enable_store);
  1061. int ccw_set_cmf(struct ccw_device *cdev, int enable)
  1062. {
  1063. return cmbops->set(cdev, enable ? 2 : 0);
  1064. }
  1065. /**
  1066. * enable_cmf() - switch on the channel measurement for a specific device
  1067. * @cdev: The ccw device to be enabled
  1068. *
  1069. * Returns %0 for success or a negative error value.
  1070. *
  1071. * Context:
  1072. * non-atomic
  1073. */
  1074. int enable_cmf(struct ccw_device *cdev)
  1075. {
  1076. int ret;
  1077. ret = cmbops->alloc(cdev);
  1078. cmbops->reset(cdev);
  1079. if (ret)
  1080. return ret;
  1081. ret = cmbops->set(cdev, 2);
  1082. if (ret) {
  1083. cmbops->free(cdev);
  1084. return ret;
  1085. }
  1086. ret = sysfs_create_group(&cdev->dev.kobj, cmbops->attr_group);
  1087. if (!ret)
  1088. return 0;
  1089. cmbops->set(cdev, 0); //FIXME: this can fail
  1090. cmbops->free(cdev);
  1091. return ret;
  1092. }
  1093. /**
  1094. * disable_cmf() - switch off the channel measurement for a specific device
  1095. * @cdev: The ccw device to be disabled
  1096. *
  1097. * Returns %0 for success or a negative error value.
  1098. *
  1099. * Context:
  1100. * non-atomic
  1101. */
  1102. int disable_cmf(struct ccw_device *cdev)
  1103. {
  1104. int ret;
  1105. ret = cmbops->set(cdev, 0);
  1106. if (ret)
  1107. return ret;
  1108. cmbops->free(cdev);
  1109. sysfs_remove_group(&cdev->dev.kobj, cmbops->attr_group);
  1110. return ret;
  1111. }
  1112. /**
  1113. * cmf_read() - read one value from the current channel measurement block
  1114. * @cdev: the channel to be read
  1115. * @index: the index of the value to be read
  1116. *
  1117. * Returns the value read or %0 if the value cannot be read.
  1118. *
  1119. * Context:
  1120. * any
  1121. */
  1122. u64 cmf_read(struct ccw_device *cdev, int index)
  1123. {
  1124. return cmbops->read(cdev, index);
  1125. }
  1126. /**
  1127. * cmf_readall() - read the current channel measurement block
  1128. * @cdev: the channel to be read
  1129. * @data: a pointer to a data block that will be filled
  1130. *
  1131. * Returns %0 on success, a negative error value otherwise.
  1132. *
  1133. * Context:
  1134. * any
  1135. */
  1136. int cmf_readall(struct ccw_device *cdev, struct cmbdata *data)
  1137. {
  1138. return cmbops->readall(cdev, data);
  1139. }
  1140. /* Reenable cmf when a disconnected device becomes available again. */
  1141. int cmf_reenable(struct ccw_device *cdev)
  1142. {
  1143. cmbops->reset(cdev);
  1144. return cmbops->set(cdev, 2);
  1145. }
  1146. static int __init init_cmf(void)
  1147. {
  1148. char *format_string;
  1149. char *detect_string = "parameter";
  1150. /*
  1151. * If the user did not give a parameter, see if we are running on a
  1152. * machine supporting extended measurement blocks, otherwise fall back
  1153. * to basic mode.
  1154. */
  1155. if (format == CMF_AUTODETECT) {
  1156. if (!css_general_characteristics.ext_mb) {
  1157. format = CMF_BASIC;
  1158. } else {
  1159. format = CMF_EXTENDED;
  1160. }
  1161. detect_string = "autodetected";
  1162. } else {
  1163. detect_string = "parameter";
  1164. }
  1165. switch (format) {
  1166. case CMF_BASIC:
  1167. format_string = "basic";
  1168. cmbops = &cmbops_basic;
  1169. break;
  1170. case CMF_EXTENDED:
  1171. format_string = "extended";
  1172. cmbops = &cmbops_extended;
  1173. break;
  1174. default:
  1175. return 1;
  1176. }
  1177. pr_info("Channel measurement facility initialized using format "
  1178. "%s (mode %s)\n", format_string, detect_string);
  1179. return 0;
  1180. }
  1181. module_init(init_cmf);
  1182. MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
  1183. MODULE_LICENSE("GPL");
  1184. MODULE_DESCRIPTION("channel measurement facility base driver\n"
  1185. "Copyright 2003 IBM Corporation\n");
  1186. EXPORT_SYMBOL_GPL(enable_cmf);
  1187. EXPORT_SYMBOL_GPL(disable_cmf);
  1188. EXPORT_SYMBOL_GPL(cmf_read);
  1189. EXPORT_SYMBOL_GPL(cmf_readall);