cmf.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325
  1. /*
  2. * linux/drivers/s390/cio/cmf.c
  3. *
  4. * Linux on zSeries Channel Measurement Facility support
  5. *
  6. * Copyright 2000,2006 IBM Corporation
  7. *
  8. * Authors: Arnd Bergmann <arndb@de.ibm.com>
  9. * Cornelia Huck <cornelia.huck@de.ibm.com>
  10. *
  11. * original idea from Natarajan Krishnaswami <nkrishna@us.ibm.com>
  12. *
  13. * This program is free software; you can redistribute it and/or modify
  14. * it under the terms of the GNU General Public License as published by
  15. * the Free Software Foundation; either version 2, or (at your option)
  16. * any later version.
  17. *
  18. * This program is distributed in the hope that it will be useful,
  19. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  21. * GNU General Public License for more details.
  22. *
  23. * You should have received a copy of the GNU General Public License
  24. * along with this program; if not, write to the Free Software
  25. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  26. */
  27. #include <linux/bootmem.h>
  28. #include <linux/device.h>
  29. #include <linux/init.h>
  30. #include <linux/list.h>
  31. #include <linux/module.h>
  32. #include <linux/moduleparam.h>
  33. #include <linux/slab.h>
  34. #include <linux/timex.h> /* get_clock() */
  35. #include <asm/ccwdev.h>
  36. #include <asm/cio.h>
  37. #include <asm/cmb.h>
  38. #include <asm/div64.h>
  39. #include "cio.h"
  40. #include "css.h"
  41. #include "device.h"
  42. #include "ioasm.h"
  43. #include "chsc.h"
  44. /*
  45. * parameter to enable cmf during boot, possible uses are:
  46. * "s390cmf" -- enable cmf and allocate 2 MB of ram so measuring can be
  47. * used on any subchannel
  48. * "s390cmf=<num>" -- enable cmf and allocate enough memory to measure
  49. * <num> subchannel, where <num> is an integer
  50. * between 1 and 65535, default is 1024
  51. */
  52. #define ARGSTRING "s390cmf"
  53. /* indices for READCMB */
  54. enum cmb_index {
  55. /* basic and exended format: */
  56. cmb_ssch_rsch_count,
  57. cmb_sample_count,
  58. cmb_device_connect_time,
  59. cmb_function_pending_time,
  60. cmb_device_disconnect_time,
  61. cmb_control_unit_queuing_time,
  62. cmb_device_active_only_time,
  63. /* extended format only: */
  64. cmb_device_busy_time,
  65. cmb_initial_command_response_time,
  66. };
  67. /**
  68. * enum cmb_format - types of supported measurement block formats
  69. *
  70. * @CMF_BASIC: traditional channel measurement blocks supported
  71. * by all machines that we run on
  72. * @CMF_EXTENDED: improved format that was introduced with the z990
  73. * machine
  74. * @CMF_AUTODETECT: default: use extended format when running on a z990
  75. * or later machine, otherwise fall back to basic format
  76. **/
  77. enum cmb_format {
  78. CMF_BASIC,
  79. CMF_EXTENDED,
  80. CMF_AUTODETECT = -1,
  81. };
  82. /**
  83. * format - actual format for all measurement blocks
  84. *
  85. * The format module parameter can be set to a value of 0 (zero)
  86. * or 1, indicating basic or extended format as described for
  87. * enum cmb_format.
  88. */
  89. static int format = CMF_AUTODETECT;
  90. module_param(format, bool, 0444);
  91. /**
  92. * struct cmb_operations - functions to use depending on cmb_format
  93. *
  94. * Most of these functions operate on a struct ccw_device. There is only
  95. * one instance of struct cmb_operations because the format of the measurement
  96. * data is guaranteed to be the same for every ccw_device.
  97. *
  98. * @alloc: allocate memory for a channel measurement block,
  99. * either with the help of a special pool or with kmalloc
  100. * @free: free memory allocated with @alloc
  101. * @set: enable or disable measurement
  102. * @readall: read a measurement block in a common format
  103. * @reset: clear the data in the associated measurement block and
  104. * reset its time stamp
  105. * @align: align an allocated block so that the hardware can use it
  106. */
  107. struct cmb_operations {
  108. int (*alloc) (struct ccw_device *);
  109. void (*free) (struct ccw_device *);
  110. int (*set) (struct ccw_device *, u32);
  111. u64 (*read) (struct ccw_device *, int);
  112. int (*readall)(struct ccw_device *, struct cmbdata *);
  113. void (*reset) (struct ccw_device *);
  114. void *(*align) (void *);
  115. struct attribute_group *attr_group;
  116. };
  117. static struct cmb_operations *cmbops;
  118. struct cmb_data {
  119. void *hw_block; /* Pointer to block updated by hardware */
  120. void *last_block; /* Last changed block copied from hardware block */
  121. int size; /* Size of hw_block and last_block */
  122. unsigned long long last_update; /* when last_block was updated */
  123. };
  124. /*
  125. * Our user interface is designed in terms of nanoseconds,
  126. * while the hardware measures total times in its own
  127. * unit.
  128. */
  129. static inline u64 time_to_nsec(u32 value)
  130. {
  131. return ((u64)value) * 128000ull;
  132. }
  133. /*
  134. * Users are usually interested in average times,
  135. * not accumulated time.
  136. * This also helps us with atomicity problems
  137. * when reading sinlge values.
  138. */
  139. static inline u64 time_to_avg_nsec(u32 value, u32 count)
  140. {
  141. u64 ret;
  142. /* no samples yet, avoid division by 0 */
  143. if (count == 0)
  144. return 0;
  145. /* value comes in units of 128 µsec */
  146. ret = time_to_nsec(value);
  147. do_div(ret, count);
  148. return ret;
  149. }
  150. /*
  151. * Activate or deactivate the channel monitor. When area is NULL,
  152. * the monitor is deactivated. The channel monitor needs to
  153. * be active in order to measure subchannels, which also need
  154. * to be enabled.
  155. */
  156. static inline void cmf_activate(void *area, unsigned int onoff)
  157. {
  158. register void * __gpr2 asm("2");
  159. register long __gpr1 asm("1");
  160. __gpr2 = area;
  161. __gpr1 = onoff ? 2 : 0;
  162. /* activate channel measurement */
  163. asm("schm" : : "d" (__gpr2), "d" (__gpr1) );
  164. }
  165. static int set_schib(struct ccw_device *cdev, u32 mme, int mbfc,
  166. unsigned long address)
  167. {
  168. int ret;
  169. int retry;
  170. struct subchannel *sch;
  171. struct schib *schib;
  172. sch = to_subchannel(cdev->dev.parent);
  173. schib = &sch->schib;
  174. /* msch can silently fail, so do it again if necessary */
  175. for (retry = 0; retry < 3; retry++) {
  176. /* prepare schib */
  177. stsch(sch->schid, schib);
  178. schib->pmcw.mme = mme;
  179. schib->pmcw.mbfc = mbfc;
  180. /* address can be either a block address or a block index */
  181. if (mbfc)
  182. schib->mba = address;
  183. else
  184. schib->pmcw.mbi = address;
  185. /* try to submit it */
  186. switch(ret = msch_err(sch->schid, schib)) {
  187. case 0:
  188. break;
  189. case 1:
  190. case 2: /* in I/O or status pending */
  191. ret = -EBUSY;
  192. break;
  193. case 3: /* subchannel is no longer valid */
  194. ret = -ENODEV;
  195. break;
  196. default: /* msch caught an exception */
  197. ret = -EINVAL;
  198. break;
  199. }
  200. stsch(sch->schid, schib); /* restore the schib */
  201. if (ret)
  202. break;
  203. /* check if it worked */
  204. if (schib->pmcw.mme == mme &&
  205. schib->pmcw.mbfc == mbfc &&
  206. (mbfc ? (schib->mba == address)
  207. : (schib->pmcw.mbi == address)))
  208. return 0;
  209. ret = -EINVAL;
  210. }
  211. return ret;
  212. }
  213. struct set_schib_struct {
  214. u32 mme;
  215. int mbfc;
  216. unsigned long address;
  217. wait_queue_head_t wait;
  218. int ret;
  219. struct kref kref;
  220. };
  221. static void cmf_set_schib_release(struct kref *kref)
  222. {
  223. struct set_schib_struct *set_data;
  224. set_data = container_of(kref, struct set_schib_struct, kref);
  225. kfree(set_data);
  226. }
  227. #define CMF_PENDING 1
  228. static int set_schib_wait(struct ccw_device *cdev, u32 mme,
  229. int mbfc, unsigned long address)
  230. {
  231. struct set_schib_struct *set_data;
  232. int ret;
  233. spin_lock_irq(cdev->ccwlock);
  234. if (!cdev->private->cmb) {
  235. ret = -ENODEV;
  236. goto out;
  237. }
  238. set_data = kzalloc(sizeof(struct set_schib_struct), GFP_ATOMIC);
  239. if (!set_data) {
  240. ret = -ENOMEM;
  241. goto out;
  242. }
  243. init_waitqueue_head(&set_data->wait);
  244. kref_init(&set_data->kref);
  245. set_data->mme = mme;
  246. set_data->mbfc = mbfc;
  247. set_data->address = address;
  248. ret = set_schib(cdev, mme, mbfc, address);
  249. if (ret != -EBUSY)
  250. goto out_put;
  251. if (cdev->private->state != DEV_STATE_ONLINE) {
  252. /* if the device is not online, don't even try again */
  253. ret = -EBUSY;
  254. goto out_put;
  255. }
  256. cdev->private->state = DEV_STATE_CMFCHANGE;
  257. set_data->ret = CMF_PENDING;
  258. cdev->private->cmb_wait = set_data;
  259. spin_unlock_irq(cdev->ccwlock);
  260. if (wait_event_interruptible(set_data->wait,
  261. set_data->ret != CMF_PENDING)) {
  262. spin_lock_irq(cdev->ccwlock);
  263. if (set_data->ret == CMF_PENDING) {
  264. set_data->ret = -ERESTARTSYS;
  265. if (cdev->private->state == DEV_STATE_CMFCHANGE)
  266. cdev->private->state = DEV_STATE_ONLINE;
  267. }
  268. spin_unlock_irq(cdev->ccwlock);
  269. }
  270. spin_lock_irq(cdev->ccwlock);
  271. cdev->private->cmb_wait = NULL;
  272. ret = set_data->ret;
  273. out_put:
  274. kref_put(&set_data->kref, cmf_set_schib_release);
  275. out:
  276. spin_unlock_irq(cdev->ccwlock);
  277. return ret;
  278. }
  279. void retry_set_schib(struct ccw_device *cdev)
  280. {
  281. struct set_schib_struct *set_data;
  282. set_data = cdev->private->cmb_wait;
  283. if (!set_data) {
  284. WARN_ON(1);
  285. return;
  286. }
  287. kref_get(&set_data->kref);
  288. set_data->ret = set_schib(cdev, set_data->mme, set_data->mbfc,
  289. set_data->address);
  290. wake_up(&set_data->wait);
  291. kref_put(&set_data->kref, cmf_set_schib_release);
  292. }
  293. static int cmf_copy_block(struct ccw_device *cdev)
  294. {
  295. struct subchannel *sch;
  296. void *reference_buf;
  297. void *hw_block;
  298. struct cmb_data *cmb_data;
  299. sch = to_subchannel(cdev->dev.parent);
  300. if (stsch(sch->schid, &sch->schib))
  301. return -ENODEV;
  302. if (sch->schib.scsw.fctl & SCSW_FCTL_START_FUNC) {
  303. /* Don't copy if a start function is in progress. */
  304. if ((!sch->schib.scsw.actl & SCSW_ACTL_SUSPENDED) &&
  305. (sch->schib.scsw.actl &
  306. (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) &&
  307. (!sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS))
  308. return -EBUSY;
  309. }
  310. cmb_data = cdev->private->cmb;
  311. hw_block = cmbops->align(cmb_data->hw_block);
  312. if (!memcmp(cmb_data->last_block, hw_block, cmb_data->size))
  313. /* No need to copy. */
  314. return 0;
  315. reference_buf = kzalloc(cmb_data->size, GFP_ATOMIC);
  316. if (!reference_buf)
  317. return -ENOMEM;
  318. /* Ensure consistency of block copied from hardware. */
  319. do {
  320. memcpy(cmb_data->last_block, hw_block, cmb_data->size);
  321. memcpy(reference_buf, hw_block, cmb_data->size);
  322. } while (memcmp(cmb_data->last_block, reference_buf, cmb_data->size));
  323. cmb_data->last_update = get_clock();
  324. kfree(reference_buf);
  325. return 0;
  326. }
  327. struct copy_block_struct {
  328. wait_queue_head_t wait;
  329. int ret;
  330. struct kref kref;
  331. };
  332. static void cmf_copy_block_release(struct kref *kref)
  333. {
  334. struct copy_block_struct *copy_block;
  335. copy_block = container_of(kref, struct copy_block_struct, kref);
  336. kfree(copy_block);
  337. }
  338. static int cmf_cmb_copy_wait(struct ccw_device *cdev)
  339. {
  340. struct copy_block_struct *copy_block;
  341. int ret;
  342. unsigned long flags;
  343. spin_lock_irqsave(cdev->ccwlock, flags);
  344. if (!cdev->private->cmb) {
  345. ret = -ENODEV;
  346. goto out;
  347. }
  348. copy_block = kzalloc(sizeof(struct copy_block_struct), GFP_ATOMIC);
  349. if (!copy_block) {
  350. ret = -ENOMEM;
  351. goto out;
  352. }
  353. init_waitqueue_head(&copy_block->wait);
  354. kref_init(&copy_block->kref);
  355. ret = cmf_copy_block(cdev);
  356. if (ret != -EBUSY)
  357. goto out_put;
  358. if (cdev->private->state != DEV_STATE_ONLINE) {
  359. ret = -EBUSY;
  360. goto out_put;
  361. }
  362. cdev->private->state = DEV_STATE_CMFUPDATE;
  363. copy_block->ret = CMF_PENDING;
  364. cdev->private->cmb_wait = copy_block;
  365. spin_unlock_irqrestore(cdev->ccwlock, flags);
  366. if (wait_event_interruptible(copy_block->wait,
  367. copy_block->ret != CMF_PENDING)) {
  368. spin_lock_irqsave(cdev->ccwlock, flags);
  369. if (copy_block->ret == CMF_PENDING) {
  370. copy_block->ret = -ERESTARTSYS;
  371. if (cdev->private->state == DEV_STATE_CMFUPDATE)
  372. cdev->private->state = DEV_STATE_ONLINE;
  373. }
  374. spin_unlock_irqrestore(cdev->ccwlock, flags);
  375. }
  376. spin_lock_irqsave(cdev->ccwlock, flags);
  377. cdev->private->cmb_wait = NULL;
  378. ret = copy_block->ret;
  379. out_put:
  380. kref_put(&copy_block->kref, cmf_copy_block_release);
  381. out:
  382. spin_unlock_irqrestore(cdev->ccwlock, flags);
  383. return ret;
  384. }
  385. void cmf_retry_copy_block(struct ccw_device *cdev)
  386. {
  387. struct copy_block_struct *copy_block;
  388. copy_block = cdev->private->cmb_wait;
  389. if (!copy_block) {
  390. WARN_ON(1);
  391. return;
  392. }
  393. kref_get(&copy_block->kref);
  394. copy_block->ret = cmf_copy_block(cdev);
  395. wake_up(&copy_block->wait);
  396. kref_put(&copy_block->kref, cmf_copy_block_release);
  397. }
  398. static void cmf_generic_reset(struct ccw_device *cdev)
  399. {
  400. struct cmb_data *cmb_data;
  401. spin_lock_irq(cdev->ccwlock);
  402. cmb_data = cdev->private->cmb;
  403. if (cmb_data) {
  404. memset(cmb_data->last_block, 0, cmb_data->size);
  405. /*
  406. * Need to reset hw block as well to make the hardware start
  407. * from 0 again.
  408. */
  409. memset(cmbops->align(cmb_data->hw_block), 0, cmb_data->size);
  410. cmb_data->last_update = 0;
  411. }
  412. cdev->private->cmb_start_time = get_clock();
  413. spin_unlock_irq(cdev->ccwlock);
  414. }
  415. /**
  416. * struct cmb_area - container for global cmb data
  417. *
  418. * @mem: pointer to CMBs (only in basic measurement mode)
  419. * @list: contains a linked list of all subchannels
  420. * @lock: protect concurrent access to @mem and @list
  421. */
  422. struct cmb_area {
  423. struct cmb *mem;
  424. struct list_head list;
  425. int num_channels;
  426. spinlock_t lock;
  427. };
  428. static struct cmb_area cmb_area = {
  429. .lock = __SPIN_LOCK_UNLOCKED(cmb_area.lock),
  430. .list = LIST_HEAD_INIT(cmb_area.list),
  431. .num_channels = 1024,
  432. };
  433. /* ****** old style CMB handling ********/
  434. /*
  435. * Basic channel measurement blocks are allocated in one contiguous
  436. * block of memory, which can not be moved as long as any channel
  437. * is active. Therefore, a maximum number of subchannels needs to
  438. * be defined somewhere. This is a module parameter, defaulting to
  439. * a resonable value of 1024, or 32 kb of memory.
  440. * Current kernels don't allow kmalloc with more than 128kb, so the
  441. * maximum is 4096.
  442. */
  443. module_param_named(maxchannels, cmb_area.num_channels, uint, 0444);
  444. /**
  445. * struct cmb - basic channel measurement block
  446. *
  447. * cmb as used by the hardware the fields are described in z/Architecture
  448. * Principles of Operation, chapter 17.
  449. * The area to be a contiguous array and may not be reallocated or freed.
  450. * Only one cmb area can be present in the system.
  451. */
  452. struct cmb {
  453. u16 ssch_rsch_count;
  454. u16 sample_count;
  455. u32 device_connect_time;
  456. u32 function_pending_time;
  457. u32 device_disconnect_time;
  458. u32 control_unit_queuing_time;
  459. u32 device_active_only_time;
  460. u32 reserved[2];
  461. };
  462. /*
  463. * Insert a single device into the cmb_area list.
  464. * Called with cmb_area.lock held from alloc_cmb.
  465. */
  466. static int alloc_cmb_single(struct ccw_device *cdev,
  467. struct cmb_data *cmb_data)
  468. {
  469. struct cmb *cmb;
  470. struct ccw_device_private *node;
  471. int ret;
  472. spin_lock_irq(cdev->ccwlock);
  473. if (!list_empty(&cdev->private->cmb_list)) {
  474. ret = -EBUSY;
  475. goto out;
  476. }
  477. /*
  478. * Find first unused cmb in cmb_area.mem.
  479. * This is a little tricky: cmb_area.list
  480. * remains sorted by ->cmb->hw_data pointers.
  481. */
  482. cmb = cmb_area.mem;
  483. list_for_each_entry(node, &cmb_area.list, cmb_list) {
  484. struct cmb_data *data;
  485. data = node->cmb;
  486. if ((struct cmb*)data->hw_block > cmb)
  487. break;
  488. cmb++;
  489. }
  490. if (cmb - cmb_area.mem >= cmb_area.num_channels) {
  491. ret = -ENOMEM;
  492. goto out;
  493. }
  494. /* insert new cmb */
  495. list_add_tail(&cdev->private->cmb_list, &node->cmb_list);
  496. cmb_data->hw_block = cmb;
  497. cdev->private->cmb = cmb_data;
  498. ret = 0;
  499. out:
  500. spin_unlock_irq(cdev->ccwlock);
  501. return ret;
  502. }
  503. static int alloc_cmb(struct ccw_device *cdev)
  504. {
  505. int ret;
  506. struct cmb *mem;
  507. ssize_t size;
  508. struct cmb_data *cmb_data;
  509. /* Allocate private cmb_data. */
  510. cmb_data = kzalloc(sizeof(struct cmb_data), GFP_KERNEL);
  511. if (!cmb_data)
  512. return -ENOMEM;
  513. cmb_data->last_block = kzalloc(sizeof(struct cmb), GFP_KERNEL);
  514. if (!cmb_data->last_block) {
  515. kfree(cmb_data);
  516. return -ENOMEM;
  517. }
  518. cmb_data->size = sizeof(struct cmb);
  519. spin_lock(&cmb_area.lock);
  520. if (!cmb_area.mem) {
  521. /* there is no user yet, so we need a new area */
  522. size = sizeof(struct cmb) * cmb_area.num_channels;
  523. WARN_ON(!list_empty(&cmb_area.list));
  524. spin_unlock(&cmb_area.lock);
  525. mem = (void*)__get_free_pages(GFP_KERNEL | GFP_DMA,
  526. get_order(size));
  527. spin_lock(&cmb_area.lock);
  528. if (cmb_area.mem) {
  529. /* ok, another thread was faster */
  530. free_pages((unsigned long)mem, get_order(size));
  531. } else if (!mem) {
  532. /* no luck */
  533. printk(KERN_WARNING "cio: failed to allocate area "
  534. "for measuring %d subchannels\n",
  535. cmb_area.num_channels);
  536. ret = -ENOMEM;
  537. goto out;
  538. } else {
  539. /* everything ok */
  540. memset(mem, 0, size);
  541. cmb_area.mem = mem;
  542. cmf_activate(cmb_area.mem, 1);
  543. }
  544. }
  545. /* do the actual allocation */
  546. ret = alloc_cmb_single(cdev, cmb_data);
  547. out:
  548. spin_unlock(&cmb_area.lock);
  549. if (ret) {
  550. kfree(cmb_data->last_block);
  551. kfree(cmb_data);
  552. }
  553. return ret;
  554. }
  555. static void free_cmb(struct ccw_device *cdev)
  556. {
  557. struct ccw_device_private *priv;
  558. struct cmb_data *cmb_data;
  559. spin_lock(&cmb_area.lock);
  560. spin_lock_irq(cdev->ccwlock);
  561. priv = cdev->private;
  562. if (list_empty(&priv->cmb_list)) {
  563. /* already freed */
  564. goto out;
  565. }
  566. cmb_data = priv->cmb;
  567. priv->cmb = NULL;
  568. if (cmb_data)
  569. kfree(cmb_data->last_block);
  570. kfree(cmb_data);
  571. list_del_init(&priv->cmb_list);
  572. if (list_empty(&cmb_area.list)) {
  573. ssize_t size;
  574. size = sizeof(struct cmb) * cmb_area.num_channels;
  575. cmf_activate(NULL, 0);
  576. free_pages((unsigned long)cmb_area.mem, get_order(size));
  577. cmb_area.mem = NULL;
  578. }
  579. out:
  580. spin_unlock_irq(cdev->ccwlock);
  581. spin_unlock(&cmb_area.lock);
  582. }
  583. static int set_cmb(struct ccw_device *cdev, u32 mme)
  584. {
  585. u16 offset;
  586. struct cmb_data *cmb_data;
  587. unsigned long flags;
  588. spin_lock_irqsave(cdev->ccwlock, flags);
  589. if (!cdev->private->cmb) {
  590. spin_unlock_irqrestore(cdev->ccwlock, flags);
  591. return -EINVAL;
  592. }
  593. cmb_data = cdev->private->cmb;
  594. offset = mme ? (struct cmb *)cmb_data->hw_block - cmb_area.mem : 0;
  595. spin_unlock_irqrestore(cdev->ccwlock, flags);
  596. return set_schib_wait(cdev, mme, 0, offset);
  597. }
  598. static u64 read_cmb(struct ccw_device *cdev, int index)
  599. {
  600. struct cmb *cmb;
  601. u32 val;
  602. int ret;
  603. unsigned long flags;
  604. ret = cmf_cmb_copy_wait(cdev);
  605. if (ret < 0)
  606. return 0;
  607. spin_lock_irqsave(cdev->ccwlock, flags);
  608. if (!cdev->private->cmb) {
  609. ret = 0;
  610. goto out;
  611. }
  612. cmb = ((struct cmb_data *)cdev->private->cmb)->last_block;
  613. switch (index) {
  614. case cmb_ssch_rsch_count:
  615. ret = cmb->ssch_rsch_count;
  616. goto out;
  617. case cmb_sample_count:
  618. ret = cmb->sample_count;
  619. goto out;
  620. case cmb_device_connect_time:
  621. val = cmb->device_connect_time;
  622. break;
  623. case cmb_function_pending_time:
  624. val = cmb->function_pending_time;
  625. break;
  626. case cmb_device_disconnect_time:
  627. val = cmb->device_disconnect_time;
  628. break;
  629. case cmb_control_unit_queuing_time:
  630. val = cmb->control_unit_queuing_time;
  631. break;
  632. case cmb_device_active_only_time:
  633. val = cmb->device_active_only_time;
  634. break;
  635. default:
  636. ret = 0;
  637. goto out;
  638. }
  639. ret = time_to_avg_nsec(val, cmb->sample_count);
  640. out:
  641. spin_unlock_irqrestore(cdev->ccwlock, flags);
  642. return ret;
  643. }
  644. static int readall_cmb(struct ccw_device *cdev, struct cmbdata *data)
  645. {
  646. struct cmb *cmb;
  647. struct cmb_data *cmb_data;
  648. u64 time;
  649. unsigned long flags;
  650. int ret;
  651. ret = cmf_cmb_copy_wait(cdev);
  652. if (ret < 0)
  653. return ret;
  654. spin_lock_irqsave(cdev->ccwlock, flags);
  655. cmb_data = cdev->private->cmb;
  656. if (!cmb_data) {
  657. ret = -ENODEV;
  658. goto out;
  659. }
  660. if (cmb_data->last_update == 0) {
  661. ret = -EAGAIN;
  662. goto out;
  663. }
  664. cmb = cmb_data->last_block;
  665. time = cmb_data->last_update - cdev->private->cmb_start_time;
  666. memset(data, 0, sizeof(struct cmbdata));
  667. /* we only know values before device_busy_time */
  668. data->size = offsetof(struct cmbdata, device_busy_time);
  669. /* convert to nanoseconds */
  670. data->elapsed_time = (time * 1000) >> 12;
  671. /* copy data to new structure */
  672. data->ssch_rsch_count = cmb->ssch_rsch_count;
  673. data->sample_count = cmb->sample_count;
  674. /* time fields are converted to nanoseconds while copying */
  675. data->device_connect_time = time_to_nsec(cmb->device_connect_time);
  676. data->function_pending_time = time_to_nsec(cmb->function_pending_time);
  677. data->device_disconnect_time =
  678. time_to_nsec(cmb->device_disconnect_time);
  679. data->control_unit_queuing_time
  680. = time_to_nsec(cmb->control_unit_queuing_time);
  681. data->device_active_only_time
  682. = time_to_nsec(cmb->device_active_only_time);
  683. ret = 0;
  684. out:
  685. spin_unlock_irqrestore(cdev->ccwlock, flags);
  686. return ret;
  687. }
  688. static void reset_cmb(struct ccw_device *cdev)
  689. {
  690. cmf_generic_reset(cdev);
  691. }
  692. static void * align_cmb(void *area)
  693. {
  694. return area;
  695. }
  696. static struct attribute_group cmf_attr_group;
  697. static struct cmb_operations cmbops_basic = {
  698. .alloc = alloc_cmb,
  699. .free = free_cmb,
  700. .set = set_cmb,
  701. .read = read_cmb,
  702. .readall = readall_cmb,
  703. .reset = reset_cmb,
  704. .align = align_cmb,
  705. .attr_group = &cmf_attr_group,
  706. };
  707. /* ******** extended cmb handling ********/
  708. /**
  709. * struct cmbe - extended channel measurement block
  710. *
  711. * cmb as used by the hardware, may be in any 64 bit physical location,
  712. * the fields are described in z/Architecture Principles of Operation,
  713. * third edition, chapter 17.
  714. */
  715. struct cmbe {
  716. u32 ssch_rsch_count;
  717. u32 sample_count;
  718. u32 device_connect_time;
  719. u32 function_pending_time;
  720. u32 device_disconnect_time;
  721. u32 control_unit_queuing_time;
  722. u32 device_active_only_time;
  723. u32 device_busy_time;
  724. u32 initial_command_response_time;
  725. u32 reserved[7];
  726. };
  727. /*
  728. * kmalloc only guarantees 8 byte alignment, but we need cmbe
  729. * pointers to be naturally aligned. Make sure to allocate
  730. * enough space for two cmbes.
  731. */
  732. static inline struct cmbe *cmbe_align(struct cmbe *c)
  733. {
  734. unsigned long addr;
  735. addr = ((unsigned long)c + sizeof (struct cmbe) - sizeof(long)) &
  736. ~(sizeof (struct cmbe) - sizeof(long));
  737. return (struct cmbe*)addr;
  738. }
  739. static int alloc_cmbe(struct ccw_device *cdev)
  740. {
  741. struct cmbe *cmbe;
  742. struct cmb_data *cmb_data;
  743. int ret;
  744. cmbe = kzalloc (sizeof (*cmbe) * 2, GFP_KERNEL);
  745. if (!cmbe)
  746. return -ENOMEM;
  747. cmb_data = kzalloc(sizeof(struct cmb_data), GFP_KERNEL);
  748. if (!cmb_data) {
  749. ret = -ENOMEM;
  750. goto out_free;
  751. }
  752. cmb_data->last_block = kzalloc(sizeof(struct cmbe), GFP_KERNEL);
  753. if (!cmb_data->last_block) {
  754. ret = -ENOMEM;
  755. goto out_free;
  756. }
  757. cmb_data->size = sizeof(struct cmbe);
  758. spin_lock_irq(cdev->ccwlock);
  759. if (cdev->private->cmb) {
  760. spin_unlock_irq(cdev->ccwlock);
  761. ret = -EBUSY;
  762. goto out_free;
  763. }
  764. cmb_data->hw_block = cmbe;
  765. cdev->private->cmb = cmb_data;
  766. spin_unlock_irq(cdev->ccwlock);
  767. /* activate global measurement if this is the first channel */
  768. spin_lock(&cmb_area.lock);
  769. if (list_empty(&cmb_area.list))
  770. cmf_activate(NULL, 1);
  771. list_add_tail(&cdev->private->cmb_list, &cmb_area.list);
  772. spin_unlock(&cmb_area.lock);
  773. return 0;
  774. out_free:
  775. if (cmb_data)
  776. kfree(cmb_data->last_block);
  777. kfree(cmb_data);
  778. kfree(cmbe);
  779. return ret;
  780. }
  781. static void free_cmbe(struct ccw_device *cdev)
  782. {
  783. struct cmb_data *cmb_data;
  784. spin_lock_irq(cdev->ccwlock);
  785. cmb_data = cdev->private->cmb;
  786. cdev->private->cmb = NULL;
  787. if (cmb_data)
  788. kfree(cmb_data->last_block);
  789. kfree(cmb_data);
  790. spin_unlock_irq(cdev->ccwlock);
  791. /* deactivate global measurement if this is the last channel */
  792. spin_lock(&cmb_area.lock);
  793. list_del_init(&cdev->private->cmb_list);
  794. if (list_empty(&cmb_area.list))
  795. cmf_activate(NULL, 0);
  796. spin_unlock(&cmb_area.lock);
  797. }
  798. static int set_cmbe(struct ccw_device *cdev, u32 mme)
  799. {
  800. unsigned long mba;
  801. struct cmb_data *cmb_data;
  802. unsigned long flags;
  803. spin_lock_irqsave(cdev->ccwlock, flags);
  804. if (!cdev->private->cmb) {
  805. spin_unlock_irqrestore(cdev->ccwlock, flags);
  806. return -EINVAL;
  807. }
  808. cmb_data = cdev->private->cmb;
  809. mba = mme ? (unsigned long) cmbe_align(cmb_data->hw_block) : 0;
  810. spin_unlock_irqrestore(cdev->ccwlock, flags);
  811. return set_schib_wait(cdev, mme, 1, mba);
  812. }
  813. static u64 read_cmbe(struct ccw_device *cdev, int index)
  814. {
  815. struct cmbe *cmb;
  816. struct cmb_data *cmb_data;
  817. u32 val;
  818. int ret;
  819. unsigned long flags;
  820. ret = cmf_cmb_copy_wait(cdev);
  821. if (ret < 0)
  822. return 0;
  823. spin_lock_irqsave(cdev->ccwlock, flags);
  824. cmb_data = cdev->private->cmb;
  825. if (!cmb_data) {
  826. ret = 0;
  827. goto out;
  828. }
  829. cmb = cmb_data->last_block;
  830. switch (index) {
  831. case cmb_ssch_rsch_count:
  832. ret = cmb->ssch_rsch_count;
  833. goto out;
  834. case cmb_sample_count:
  835. ret = cmb->sample_count;
  836. goto out;
  837. case cmb_device_connect_time:
  838. val = cmb->device_connect_time;
  839. break;
  840. case cmb_function_pending_time:
  841. val = cmb->function_pending_time;
  842. break;
  843. case cmb_device_disconnect_time:
  844. val = cmb->device_disconnect_time;
  845. break;
  846. case cmb_control_unit_queuing_time:
  847. val = cmb->control_unit_queuing_time;
  848. break;
  849. case cmb_device_active_only_time:
  850. val = cmb->device_active_only_time;
  851. break;
  852. case cmb_device_busy_time:
  853. val = cmb->device_busy_time;
  854. break;
  855. case cmb_initial_command_response_time:
  856. val = cmb->initial_command_response_time;
  857. break;
  858. default:
  859. ret = 0;
  860. goto out;
  861. }
  862. ret = time_to_avg_nsec(val, cmb->sample_count);
  863. out:
  864. spin_unlock_irqrestore(cdev->ccwlock, flags);
  865. return ret;
  866. }
  867. static int readall_cmbe(struct ccw_device *cdev, struct cmbdata *data)
  868. {
  869. struct cmbe *cmb;
  870. struct cmb_data *cmb_data;
  871. u64 time;
  872. unsigned long flags;
  873. int ret;
  874. ret = cmf_cmb_copy_wait(cdev);
  875. if (ret < 0)
  876. return ret;
  877. spin_lock_irqsave(cdev->ccwlock, flags);
  878. cmb_data = cdev->private->cmb;
  879. if (!cmb_data) {
  880. ret = -ENODEV;
  881. goto out;
  882. }
  883. if (cmb_data->last_update == 0) {
  884. ret = -EAGAIN;
  885. goto out;
  886. }
  887. time = cmb_data->last_update - cdev->private->cmb_start_time;
  888. memset (data, 0, sizeof(struct cmbdata));
  889. /* we only know values before device_busy_time */
  890. data->size = offsetof(struct cmbdata, device_busy_time);
  891. /* conver to nanoseconds */
  892. data->elapsed_time = (time * 1000) >> 12;
  893. cmb = cmb_data->last_block;
  894. /* copy data to new structure */
  895. data->ssch_rsch_count = cmb->ssch_rsch_count;
  896. data->sample_count = cmb->sample_count;
  897. /* time fields are converted to nanoseconds while copying */
  898. data->device_connect_time = time_to_nsec(cmb->device_connect_time);
  899. data->function_pending_time = time_to_nsec(cmb->function_pending_time);
  900. data->device_disconnect_time =
  901. time_to_nsec(cmb->device_disconnect_time);
  902. data->control_unit_queuing_time
  903. = time_to_nsec(cmb->control_unit_queuing_time);
  904. data->device_active_only_time
  905. = time_to_nsec(cmb->device_active_only_time);
  906. data->device_busy_time = time_to_nsec(cmb->device_busy_time);
  907. data->initial_command_response_time
  908. = time_to_nsec(cmb->initial_command_response_time);
  909. ret = 0;
  910. out:
  911. spin_unlock_irqrestore(cdev->ccwlock, flags);
  912. return ret;
  913. }
  914. static void reset_cmbe(struct ccw_device *cdev)
  915. {
  916. cmf_generic_reset(cdev);
  917. }
  918. static void * align_cmbe(void *area)
  919. {
  920. return cmbe_align(area);
  921. }
  922. static struct attribute_group cmf_attr_group_ext;
  923. static struct cmb_operations cmbops_extended = {
  924. .alloc = alloc_cmbe,
  925. .free = free_cmbe,
  926. .set = set_cmbe,
  927. .read = read_cmbe,
  928. .readall = readall_cmbe,
  929. .reset = reset_cmbe,
  930. .align = align_cmbe,
  931. .attr_group = &cmf_attr_group_ext,
  932. };
  933. static ssize_t cmb_show_attr(struct device *dev, char *buf, enum cmb_index idx)
  934. {
  935. return sprintf(buf, "%lld\n",
  936. (unsigned long long) cmf_read(to_ccwdev(dev), idx));
  937. }
  938. static ssize_t cmb_show_avg_sample_interval(struct device *dev,
  939. struct device_attribute *attr,
  940. char *buf)
  941. {
  942. struct ccw_device *cdev;
  943. long interval;
  944. unsigned long count;
  945. struct cmb_data *cmb_data;
  946. cdev = to_ccwdev(dev);
  947. count = cmf_read(cdev, cmb_sample_count);
  948. spin_lock_irq(cdev->ccwlock);
  949. cmb_data = cdev->private->cmb;
  950. if (count) {
  951. interval = cmb_data->last_update -
  952. cdev->private->cmb_start_time;
  953. interval = (interval * 1000) >> 12;
  954. interval /= count;
  955. } else
  956. interval = -1;
  957. spin_unlock_irq(cdev->ccwlock);
  958. return sprintf(buf, "%ld\n", interval);
  959. }
  960. static ssize_t cmb_show_avg_utilization(struct device *dev,
  961. struct device_attribute *attr,
  962. char *buf)
  963. {
  964. struct cmbdata data;
  965. u64 utilization;
  966. unsigned long t, u;
  967. int ret;
  968. ret = cmf_readall(to_ccwdev(dev), &data);
  969. if (ret == -EAGAIN || ret == -ENODEV)
  970. /* No data (yet/currently) available to use for calculation. */
  971. return sprintf(buf, "n/a\n");
  972. else if (ret)
  973. return ret;
  974. utilization = data.device_connect_time +
  975. data.function_pending_time +
  976. data.device_disconnect_time;
  977. /* shift to avoid long long division */
  978. while (-1ul < (data.elapsed_time | utilization)) {
  979. utilization >>= 8;
  980. data.elapsed_time >>= 8;
  981. }
  982. /* calculate value in 0.1 percent units */
  983. t = (unsigned long) data.elapsed_time / 1000;
  984. u = (unsigned long) utilization / t;
  985. return sprintf(buf, "%02ld.%01ld%%\n", u/ 10, u - (u/ 10) * 10);
  986. }
  987. #define cmf_attr(name) \
  988. static ssize_t show_##name(struct device *dev, \
  989. struct device_attribute *attr, char *buf) \
  990. { return cmb_show_attr((dev), buf, cmb_##name); } \
  991. static DEVICE_ATTR(name, 0444, show_##name, NULL);
  992. #define cmf_attr_avg(name) \
  993. static ssize_t show_avg_##name(struct device *dev, \
  994. struct device_attribute *attr, char *buf) \
  995. { return cmb_show_attr((dev), buf, cmb_##name); } \
  996. static DEVICE_ATTR(avg_##name, 0444, show_avg_##name, NULL);
  997. cmf_attr(ssch_rsch_count);
  998. cmf_attr(sample_count);
  999. cmf_attr_avg(device_connect_time);
  1000. cmf_attr_avg(function_pending_time);
  1001. cmf_attr_avg(device_disconnect_time);
  1002. cmf_attr_avg(control_unit_queuing_time);
  1003. cmf_attr_avg(device_active_only_time);
  1004. cmf_attr_avg(device_busy_time);
  1005. cmf_attr_avg(initial_command_response_time);
  1006. static DEVICE_ATTR(avg_sample_interval, 0444, cmb_show_avg_sample_interval,
  1007. NULL);
  1008. static DEVICE_ATTR(avg_utilization, 0444, cmb_show_avg_utilization, NULL);
  1009. static struct attribute *cmf_attributes[] = {
  1010. &dev_attr_avg_sample_interval.attr,
  1011. &dev_attr_avg_utilization.attr,
  1012. &dev_attr_ssch_rsch_count.attr,
  1013. &dev_attr_sample_count.attr,
  1014. &dev_attr_avg_device_connect_time.attr,
  1015. &dev_attr_avg_function_pending_time.attr,
  1016. &dev_attr_avg_device_disconnect_time.attr,
  1017. &dev_attr_avg_control_unit_queuing_time.attr,
  1018. &dev_attr_avg_device_active_only_time.attr,
  1019. NULL,
  1020. };
  1021. static struct attribute_group cmf_attr_group = {
  1022. .name = "cmf",
  1023. .attrs = cmf_attributes,
  1024. };
  1025. static struct attribute *cmf_attributes_ext[] = {
  1026. &dev_attr_avg_sample_interval.attr,
  1027. &dev_attr_avg_utilization.attr,
  1028. &dev_attr_ssch_rsch_count.attr,
  1029. &dev_attr_sample_count.attr,
  1030. &dev_attr_avg_device_connect_time.attr,
  1031. &dev_attr_avg_function_pending_time.attr,
  1032. &dev_attr_avg_device_disconnect_time.attr,
  1033. &dev_attr_avg_control_unit_queuing_time.attr,
  1034. &dev_attr_avg_device_active_only_time.attr,
  1035. &dev_attr_avg_device_busy_time.attr,
  1036. &dev_attr_avg_initial_command_response_time.attr,
  1037. NULL,
  1038. };
  1039. static struct attribute_group cmf_attr_group_ext = {
  1040. .name = "cmf",
  1041. .attrs = cmf_attributes_ext,
  1042. };
  1043. static ssize_t cmb_enable_show(struct device *dev,
  1044. struct device_attribute *attr,
  1045. char *buf)
  1046. {
  1047. return sprintf(buf, "%d\n", to_ccwdev(dev)->private->cmb ? 1 : 0);
  1048. }
  1049. static ssize_t cmb_enable_store(struct device *dev,
  1050. struct device_attribute *attr, const char *buf,
  1051. size_t c)
  1052. {
  1053. struct ccw_device *cdev;
  1054. int ret;
  1055. cdev = to_ccwdev(dev);
  1056. switch (buf[0]) {
  1057. case '0':
  1058. ret = disable_cmf(cdev);
  1059. if (ret)
  1060. dev_info(&cdev->dev, "disable_cmf failed (%d)\n", ret);
  1061. break;
  1062. case '1':
  1063. ret = enable_cmf(cdev);
  1064. if (ret && ret != -EBUSY)
  1065. dev_info(&cdev->dev, "enable_cmf failed (%d)\n", ret);
  1066. break;
  1067. }
  1068. return c;
  1069. }
  1070. DEVICE_ATTR(cmb_enable, 0644, cmb_enable_show, cmb_enable_store);
  1071. /* enable_cmf/disable_cmf: module interface for cmf (de)activation */
  1072. int enable_cmf(struct ccw_device *cdev)
  1073. {
  1074. int ret;
  1075. ret = cmbops->alloc(cdev);
  1076. cmbops->reset(cdev);
  1077. if (ret)
  1078. return ret;
  1079. ret = cmbops->set(cdev, 2);
  1080. if (ret) {
  1081. cmbops->free(cdev);
  1082. return ret;
  1083. }
  1084. ret = sysfs_create_group(&cdev->dev.kobj, cmbops->attr_group);
  1085. if (!ret)
  1086. return 0;
  1087. cmbops->set(cdev, 0); //FIXME: this can fail
  1088. cmbops->free(cdev);
  1089. return ret;
  1090. }
  1091. int disable_cmf(struct ccw_device *cdev)
  1092. {
  1093. int ret;
  1094. ret = cmbops->set(cdev, 0);
  1095. if (ret)
  1096. return ret;
  1097. cmbops->free(cdev);
  1098. sysfs_remove_group(&cdev->dev.kobj, cmbops->attr_group);
  1099. return ret;
  1100. }
  1101. u64 cmf_read(struct ccw_device *cdev, int index)
  1102. {
  1103. return cmbops->read(cdev, index);
  1104. }
  1105. int cmf_readall(struct ccw_device *cdev, struct cmbdata *data)
  1106. {
  1107. return cmbops->readall(cdev, data);
  1108. }
  1109. /* Reenable cmf when a disconnected device becomes available again. */
  1110. int cmf_reenable(struct ccw_device *cdev)
  1111. {
  1112. cmbops->reset(cdev);
  1113. return cmbops->set(cdev, 2);
  1114. }
  1115. static int __init init_cmf(void)
  1116. {
  1117. char *format_string;
  1118. char *detect_string = "parameter";
  1119. /*
  1120. * If the user did not give a parameter, see if we are running on a
  1121. * machine supporting extended measurement blocks, otherwise fall back
  1122. * to basic mode.
  1123. */
  1124. if (format == CMF_AUTODETECT) {
  1125. if (!css_characteristics_avail ||
  1126. !css_general_characteristics.ext_mb) {
  1127. format = CMF_BASIC;
  1128. } else {
  1129. format = CMF_EXTENDED;
  1130. }
  1131. detect_string = "autodetected";
  1132. } else {
  1133. detect_string = "parameter";
  1134. }
  1135. switch (format) {
  1136. case CMF_BASIC:
  1137. format_string = "basic";
  1138. cmbops = &cmbops_basic;
  1139. break;
  1140. case CMF_EXTENDED:
  1141. format_string = "extended";
  1142. cmbops = &cmbops_extended;
  1143. break;
  1144. default:
  1145. printk(KERN_ERR "cio: Invalid format %d for channel "
  1146. "measurement facility\n", format);
  1147. return 1;
  1148. }
  1149. printk(KERN_INFO "cio: Channel measurement facility using %s "
  1150. "format (%s)\n", format_string, detect_string);
  1151. return 0;
  1152. }
  1153. module_init(init_cmf);
  1154. MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
  1155. MODULE_LICENSE("GPL");
  1156. MODULE_DESCRIPTION("channel measurement facility base driver\n"
  1157. "Copyright 2003 IBM Corporation\n");
  1158. EXPORT_SYMBOL_GPL(enable_cmf);
  1159. EXPORT_SYMBOL_GPL(disable_cmf);
  1160. EXPORT_SYMBOL_GPL(cmf_read);
  1161. EXPORT_SYMBOL_GPL(cmf_readall);