cmf.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044
  1. /*
  2. * linux/drivers/s390/cio/cmf.c
  3. *
  4. * Linux on zSeries Channel Measurement Facility support
  5. *
  6. * Copyright 2000,2003 IBM Corporation
  7. *
  8. * Author: Arnd Bergmann <arndb@de.ibm.com>
  9. *
  10. * original idea from Natarajan Krishnaswami <nkrishna@us.ibm.com>
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2, or (at your option)
  15. * any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  20. * GNU General Public License for more details.
  21. *
  22. * You should have received a copy of the GNU General Public License
  23. * along with this program; if not, write to the Free Software
  24. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  25. */
  26. #include <linux/bootmem.h>
  27. #include <linux/device.h>
  28. #include <linux/init.h>
  29. #include <linux/list.h>
  30. #include <linux/module.h>
  31. #include <linux/moduleparam.h>
  32. #include <linux/slab.h>
  33. #include <linux/timex.h> /* get_clock() */
  34. #include <asm/ccwdev.h>
  35. #include <asm/cio.h>
  36. #include <asm/cmb.h>
  37. #include <asm/div64.h>
  38. #include "cio.h"
  39. #include "css.h"
  40. #include "device.h"
  41. #include "ioasm.h"
  42. #include "chsc.h"
  43. /* parameter to enable cmf during boot, possible uses are:
  44. * "s390cmf" -- enable cmf and allocate 2 MB of ram so measuring can be
  45. * used on any subchannel
  46. * "s390cmf=<num>" -- enable cmf and allocate enough memory to measure
  47. * <num> subchannel, where <num> is an integer
  48. * between 1 and 65535, default is 1024
  49. */
  50. #define ARGSTRING "s390cmf"
  51. /* indices for READCMB */
  52. enum cmb_index {
  53. /* basic and exended format: */
  54. cmb_ssch_rsch_count,
  55. cmb_sample_count,
  56. cmb_device_connect_time,
  57. cmb_function_pending_time,
  58. cmb_device_disconnect_time,
  59. cmb_control_unit_queuing_time,
  60. cmb_device_active_only_time,
  61. /* extended format only: */
  62. cmb_device_busy_time,
  63. cmb_initial_command_response_time,
  64. };
  65. /**
  66. * enum cmb_format - types of supported measurement block formats
  67. *
  68. * @CMF_BASIC: traditional channel measurement blocks supported
  69. * by all machines that we run on
  70. * @CMF_EXTENDED: improved format that was introduced with the z990
  71. * machine
  72. * @CMF_AUTODETECT: default: use extended format when running on a z990
  73. * or later machine, otherwise fall back to basic format
  74. **/
  75. enum cmb_format {
  76. CMF_BASIC,
  77. CMF_EXTENDED,
  78. CMF_AUTODETECT = -1,
  79. };
  80. /**
  81. * format - actual format for all measurement blocks
  82. *
  83. * The format module parameter can be set to a value of 0 (zero)
  84. * or 1, indicating basic or extended format as described for
  85. * enum cmb_format.
  86. */
  87. static int format = CMF_AUTODETECT;
  88. module_param(format, bool, 0444);
  89. /**
  90. * struct cmb_operations - functions to use depending on cmb_format
  91. *
  92. * all these functions operate on a struct cmf_device. There is only
  93. * one instance of struct cmb_operations because all cmf_device
  94. * objects are guaranteed to be of the same type.
  95. *
  96. * @alloc: allocate memory for a channel measurement block,
  97. * either with the help of a special pool or with kmalloc
  98. * @free: free memory allocated with @alloc
  99. * @set: enable or disable measurement
  100. * @readall: read a measurement block in a common format
  101. * @reset: clear the data in the associated measurement block and
  102. * reset its time stamp
  103. */
  104. struct cmb_operations {
  105. int (*alloc) (struct ccw_device*);
  106. void(*free) (struct ccw_device*);
  107. int (*set) (struct ccw_device*, u32);
  108. u64 (*read) (struct ccw_device*, int);
  109. int (*readall)(struct ccw_device*, struct cmbdata *);
  110. void (*reset) (struct ccw_device*);
  111. struct attribute_group *attr_group;
  112. };
  113. static struct cmb_operations *cmbops;
  114. /* our user interface is designed in terms of nanoseconds,
  115. * while the hardware measures total times in its own
  116. * unit.*/
  117. static inline u64 time_to_nsec(u32 value)
  118. {
  119. return ((u64)value) * 128000ull;
  120. }
  121. /*
  122. * Users are usually interested in average times,
  123. * not accumulated time.
  124. * This also helps us with atomicity problems
  125. * when reading sinlge values.
  126. */
  127. static inline u64 time_to_avg_nsec(u32 value, u32 count)
  128. {
  129. u64 ret;
  130. /* no samples yet, avoid division by 0 */
  131. if (count == 0)
  132. return 0;
  133. /* value comes in units of 128 µsec */
  134. ret = time_to_nsec(value);
  135. do_div(ret, count);
  136. return ret;
  137. }
  138. /* activate or deactivate the channel monitor. When area is NULL,
  139. * the monitor is deactivated. The channel monitor needs to
  140. * be active in order to measure subchannels, which also need
  141. * to be enabled. */
  142. static inline void
  143. cmf_activate(void *area, unsigned int onoff)
  144. {
  145. register void * __gpr2 asm("2");
  146. register long __gpr1 asm("1");
  147. __gpr2 = area;
  148. __gpr1 = onoff ? 2 : 0;
  149. /* activate channel measurement */
  150. asm("schm" : : "d" (__gpr2), "d" (__gpr1) );
  151. }
  152. static int
  153. set_schib(struct ccw_device *cdev, u32 mme, int mbfc, unsigned long address)
  154. {
  155. int ret;
  156. int retry;
  157. struct subchannel *sch;
  158. struct schib *schib;
  159. sch = to_subchannel(cdev->dev.parent);
  160. schib = &sch->schib;
  161. /* msch can silently fail, so do it again if necessary */
  162. for (retry = 0; retry < 3; retry++) {
  163. /* prepare schib */
  164. stsch(sch->schid, schib);
  165. schib->pmcw.mme = mme;
  166. schib->pmcw.mbfc = mbfc;
  167. /* address can be either a block address or a block index */
  168. if (mbfc)
  169. schib->mba = address;
  170. else
  171. schib->pmcw.mbi = address;
  172. /* try to submit it */
  173. switch(ret = msch_err(sch->schid, schib)) {
  174. case 0:
  175. break;
  176. case 1:
  177. case 2: /* in I/O or status pending */
  178. ret = -EBUSY;
  179. break;
  180. case 3: /* subchannel is no longer valid */
  181. ret = -ENODEV;
  182. break;
  183. default: /* msch caught an exception */
  184. ret = -EINVAL;
  185. break;
  186. }
  187. stsch(sch->schid, schib); /* restore the schib */
  188. if (ret)
  189. break;
  190. /* check if it worked */
  191. if (schib->pmcw.mme == mme &&
  192. schib->pmcw.mbfc == mbfc &&
  193. (mbfc ? (schib->mba == address)
  194. : (schib->pmcw.mbi == address)))
  195. return 0;
  196. ret = -EINVAL;
  197. }
  198. return ret;
  199. }
  200. struct set_schib_struct {
  201. u32 mme;
  202. int mbfc;
  203. unsigned long address;
  204. wait_queue_head_t wait;
  205. int ret;
  206. };
  207. static int set_schib_wait(struct ccw_device *cdev, u32 mme,
  208. int mbfc, unsigned long address)
  209. {
  210. struct set_schib_struct s = {
  211. .mme = mme,
  212. .mbfc = mbfc,
  213. .address = address,
  214. .wait = __WAIT_QUEUE_HEAD_INITIALIZER(s.wait),
  215. };
  216. spin_lock_irq(cdev->ccwlock);
  217. s.ret = set_schib(cdev, mme, mbfc, address);
  218. if (s.ret != -EBUSY) {
  219. goto out_nowait;
  220. }
  221. if (cdev->private->state != DEV_STATE_ONLINE) {
  222. s.ret = -EBUSY;
  223. /* if the device is not online, don't even try again */
  224. goto out_nowait;
  225. }
  226. cdev->private->state = DEV_STATE_CMFCHANGE;
  227. cdev->private->cmb_wait = &s;
  228. s.ret = 1;
  229. spin_unlock_irq(cdev->ccwlock);
  230. if (wait_event_interruptible(s.wait, s.ret != 1)) {
  231. spin_lock_irq(cdev->ccwlock);
  232. if (s.ret == 1) {
  233. s.ret = -ERESTARTSYS;
  234. cdev->private->cmb_wait = 0;
  235. if (cdev->private->state == DEV_STATE_CMFCHANGE)
  236. cdev->private->state = DEV_STATE_ONLINE;
  237. }
  238. spin_unlock_irq(cdev->ccwlock);
  239. }
  240. return s.ret;
  241. out_nowait:
  242. spin_unlock_irq(cdev->ccwlock);
  243. return s.ret;
  244. }
  245. void retry_set_schib(struct ccw_device *cdev)
  246. {
  247. struct set_schib_struct *s;
  248. s = cdev->private->cmb_wait;
  249. cdev->private->cmb_wait = 0;
  250. if (!s) {
  251. WARN_ON(1);
  252. return;
  253. }
  254. s->ret = set_schib(cdev, s->mme, s->mbfc, s->address);
  255. wake_up(&s->wait);
  256. }
  257. /**
  258. * struct cmb_area - container for global cmb data
  259. *
  260. * @mem: pointer to CMBs (only in basic measurement mode)
  261. * @list: contains a linked list of all subchannels
  262. * @lock: protect concurrent access to @mem and @list
  263. */
  264. struct cmb_area {
  265. struct cmb *mem;
  266. struct list_head list;
  267. int num_channels;
  268. spinlock_t lock;
  269. };
  270. static struct cmb_area cmb_area = {
  271. .lock = SPIN_LOCK_UNLOCKED,
  272. .list = LIST_HEAD_INIT(cmb_area.list),
  273. .num_channels = 1024,
  274. };
  275. /* ****** old style CMB handling ********/
  276. /** int maxchannels
  277. *
  278. * Basic channel measurement blocks are allocated in one contiguous
  279. * block of memory, which can not be moved as long as any channel
  280. * is active. Therefore, a maximum number of subchannels needs to
  281. * be defined somewhere. This is a module parameter, defaulting to
  282. * a resonable value of 1024, or 32 kb of memory.
  283. * Current kernels don't allow kmalloc with more than 128kb, so the
  284. * maximum is 4096
  285. */
  286. module_param_named(maxchannels, cmb_area.num_channels, uint, 0444);
  287. /**
  288. * struct cmb - basic channel measurement block
  289. *
  290. * cmb as used by the hardware the fields are described in z/Architecture
  291. * Principles of Operation, chapter 17.
  292. * The area to be a contiguous array and may not be reallocated or freed.
  293. * Only one cmb area can be present in the system.
  294. */
  295. struct cmb {
  296. u16 ssch_rsch_count;
  297. u16 sample_count;
  298. u32 device_connect_time;
  299. u32 function_pending_time;
  300. u32 device_disconnect_time;
  301. u32 control_unit_queuing_time;
  302. u32 device_active_only_time;
  303. u32 reserved[2];
  304. };
  305. /* insert a single device into the cmb_area list
  306. * called with cmb_area.lock held from alloc_cmb
  307. */
  308. static inline int
  309. alloc_cmb_single (struct ccw_device *cdev)
  310. {
  311. struct cmb *cmb;
  312. struct ccw_device_private *node;
  313. int ret;
  314. spin_lock_irq(cdev->ccwlock);
  315. if (!list_empty(&cdev->private->cmb_list)) {
  316. ret = -EBUSY;
  317. goto out;
  318. }
  319. /* find first unused cmb in cmb_area.mem.
  320. * this is a little tricky: cmb_area.list
  321. * remains sorted by ->cmb pointers */
  322. cmb = cmb_area.mem;
  323. list_for_each_entry(node, &cmb_area.list, cmb_list) {
  324. if ((struct cmb*)node->cmb > cmb)
  325. break;
  326. cmb++;
  327. }
  328. if (cmb - cmb_area.mem >= cmb_area.num_channels) {
  329. ret = -ENOMEM;
  330. goto out;
  331. }
  332. /* insert new cmb */
  333. list_add_tail(&cdev->private->cmb_list, &node->cmb_list);
  334. cdev->private->cmb = cmb;
  335. ret = 0;
  336. out:
  337. spin_unlock_irq(cdev->ccwlock);
  338. return ret;
  339. }
  340. static int
  341. alloc_cmb (struct ccw_device *cdev)
  342. {
  343. int ret;
  344. struct cmb *mem;
  345. ssize_t size;
  346. spin_lock(&cmb_area.lock);
  347. if (!cmb_area.mem) {
  348. /* there is no user yet, so we need a new area */
  349. size = sizeof(struct cmb) * cmb_area.num_channels;
  350. WARN_ON(!list_empty(&cmb_area.list));
  351. spin_unlock(&cmb_area.lock);
  352. mem = (void*)__get_free_pages(GFP_KERNEL | GFP_DMA,
  353. get_order(size));
  354. spin_lock(&cmb_area.lock);
  355. if (cmb_area.mem) {
  356. /* ok, another thread was faster */
  357. free_pages((unsigned long)mem, get_order(size));
  358. } else if (!mem) {
  359. /* no luck */
  360. ret = -ENOMEM;
  361. goto out;
  362. } else {
  363. /* everything ok */
  364. memset(mem, 0, size);
  365. cmb_area.mem = mem;
  366. cmf_activate(cmb_area.mem, 1);
  367. }
  368. }
  369. /* do the actual allocation */
  370. ret = alloc_cmb_single(cdev);
  371. out:
  372. spin_unlock(&cmb_area.lock);
  373. return ret;
  374. }
  375. static void
  376. free_cmb(struct ccw_device *cdev)
  377. {
  378. struct ccw_device_private *priv;
  379. priv = cdev->private;
  380. spin_lock(&cmb_area.lock);
  381. spin_lock_irq(cdev->ccwlock);
  382. if (list_empty(&priv->cmb_list)) {
  383. /* already freed */
  384. goto out;
  385. }
  386. priv->cmb = NULL;
  387. list_del_init(&priv->cmb_list);
  388. if (list_empty(&cmb_area.list)) {
  389. ssize_t size;
  390. size = sizeof(struct cmb) * cmb_area.num_channels;
  391. cmf_activate(NULL, 0);
  392. free_pages((unsigned long)cmb_area.mem, get_order(size));
  393. cmb_area.mem = NULL;
  394. }
  395. out:
  396. spin_unlock_irq(cdev->ccwlock);
  397. spin_unlock(&cmb_area.lock);
  398. }
  399. static int
  400. set_cmb(struct ccw_device *cdev, u32 mme)
  401. {
  402. u16 offset;
  403. if (!cdev->private->cmb)
  404. return -EINVAL;
  405. offset = mme ? (struct cmb *)cdev->private->cmb - cmb_area.mem : 0;
  406. return set_schib_wait(cdev, mme, 0, offset);
  407. }
  408. static u64
  409. read_cmb (struct ccw_device *cdev, int index)
  410. {
  411. /* yes, we have to put it on the stack
  412. * because the cmb must only be accessed
  413. * atomically, e.g. with mvc */
  414. struct cmb cmb;
  415. unsigned long flags;
  416. u32 val;
  417. spin_lock_irqsave(cdev->ccwlock, flags);
  418. if (!cdev->private->cmb) {
  419. spin_unlock_irqrestore(cdev->ccwlock, flags);
  420. return 0;
  421. }
  422. cmb = *(struct cmb*)cdev->private->cmb;
  423. spin_unlock_irqrestore(cdev->ccwlock, flags);
  424. switch (index) {
  425. case cmb_ssch_rsch_count:
  426. return cmb.ssch_rsch_count;
  427. case cmb_sample_count:
  428. return cmb.sample_count;
  429. case cmb_device_connect_time:
  430. val = cmb.device_connect_time;
  431. break;
  432. case cmb_function_pending_time:
  433. val = cmb.function_pending_time;
  434. break;
  435. case cmb_device_disconnect_time:
  436. val = cmb.device_disconnect_time;
  437. break;
  438. case cmb_control_unit_queuing_time:
  439. val = cmb.control_unit_queuing_time;
  440. break;
  441. case cmb_device_active_only_time:
  442. val = cmb.device_active_only_time;
  443. break;
  444. default:
  445. return 0;
  446. }
  447. return time_to_avg_nsec(val, cmb.sample_count);
  448. }
  449. static int
  450. readall_cmb (struct ccw_device *cdev, struct cmbdata *data)
  451. {
  452. /* yes, we have to put it on the stack
  453. * because the cmb must only be accessed
  454. * atomically, e.g. with mvc */
  455. struct cmb cmb;
  456. unsigned long flags;
  457. u64 time;
  458. spin_lock_irqsave(cdev->ccwlock, flags);
  459. if (!cdev->private->cmb) {
  460. spin_unlock_irqrestore(cdev->ccwlock, flags);
  461. return -ENODEV;
  462. }
  463. cmb = *(struct cmb*)cdev->private->cmb;
  464. time = get_clock() - cdev->private->cmb_start_time;
  465. spin_unlock_irqrestore(cdev->ccwlock, flags);
  466. memset(data, 0, sizeof(struct cmbdata));
  467. /* we only know values before device_busy_time */
  468. data->size = offsetof(struct cmbdata, device_busy_time);
  469. /* convert to nanoseconds */
  470. data->elapsed_time = (time * 1000) >> 12;
  471. /* copy data to new structure */
  472. data->ssch_rsch_count = cmb.ssch_rsch_count;
  473. data->sample_count = cmb.sample_count;
  474. /* time fields are converted to nanoseconds while copying */
  475. data->device_connect_time = time_to_nsec(cmb.device_connect_time);
  476. data->function_pending_time = time_to_nsec(cmb.function_pending_time);
  477. data->device_disconnect_time = time_to_nsec(cmb.device_disconnect_time);
  478. data->control_unit_queuing_time
  479. = time_to_nsec(cmb.control_unit_queuing_time);
  480. data->device_active_only_time
  481. = time_to_nsec(cmb.device_active_only_time);
  482. return 0;
  483. }
  484. static void
  485. reset_cmb(struct ccw_device *cdev)
  486. {
  487. struct cmb *cmb;
  488. spin_lock_irq(cdev->ccwlock);
  489. cmb = cdev->private->cmb;
  490. if (cmb)
  491. memset (cmb, 0, sizeof (*cmb));
  492. cdev->private->cmb_start_time = get_clock();
  493. spin_unlock_irq(cdev->ccwlock);
  494. }
  495. static struct attribute_group cmf_attr_group;
  496. static struct cmb_operations cmbops_basic = {
  497. .alloc = alloc_cmb,
  498. .free = free_cmb,
  499. .set = set_cmb,
  500. .read = read_cmb,
  501. .readall = readall_cmb,
  502. .reset = reset_cmb,
  503. .attr_group = &cmf_attr_group,
  504. };
  505. /* ******** extended cmb handling ********/
  506. /**
  507. * struct cmbe - extended channel measurement block
  508. *
  509. * cmb as used by the hardware, may be in any 64 bit physical location,
  510. * the fields are described in z/Architecture Principles of Operation,
  511. * third edition, chapter 17.
  512. */
  513. struct cmbe {
  514. u32 ssch_rsch_count;
  515. u32 sample_count;
  516. u32 device_connect_time;
  517. u32 function_pending_time;
  518. u32 device_disconnect_time;
  519. u32 control_unit_queuing_time;
  520. u32 device_active_only_time;
  521. u32 device_busy_time;
  522. u32 initial_command_response_time;
  523. u32 reserved[7];
  524. };
  525. /* kmalloc only guarantees 8 byte alignment, but we need cmbe
  526. * pointers to be naturally aligned. Make sure to allocate
  527. * enough space for two cmbes */
  528. static inline struct cmbe* cmbe_align(struct cmbe *c)
  529. {
  530. unsigned long addr;
  531. addr = ((unsigned long)c + sizeof (struct cmbe) - sizeof(long)) &
  532. ~(sizeof (struct cmbe) - sizeof(long));
  533. return (struct cmbe*)addr;
  534. }
  535. static int
  536. alloc_cmbe (struct ccw_device *cdev)
  537. {
  538. struct cmbe *cmbe;
  539. cmbe = kmalloc (sizeof (*cmbe) * 2, GFP_KERNEL);
  540. if (!cmbe)
  541. return -ENOMEM;
  542. spin_lock_irq(cdev->ccwlock);
  543. if (cdev->private->cmb) {
  544. kfree(cmbe);
  545. spin_unlock_irq(cdev->ccwlock);
  546. return -EBUSY;
  547. }
  548. cdev->private->cmb = cmbe;
  549. spin_unlock_irq(cdev->ccwlock);
  550. /* activate global measurement if this is the first channel */
  551. spin_lock(&cmb_area.lock);
  552. if (list_empty(&cmb_area.list))
  553. cmf_activate(NULL, 1);
  554. list_add_tail(&cdev->private->cmb_list, &cmb_area.list);
  555. spin_unlock(&cmb_area.lock);
  556. return 0;
  557. }
  558. static void
  559. free_cmbe (struct ccw_device *cdev)
  560. {
  561. spin_lock_irq(cdev->ccwlock);
  562. kfree(cdev->private->cmb);
  563. cdev->private->cmb = NULL;
  564. spin_unlock_irq(cdev->ccwlock);
  565. /* deactivate global measurement if this is the last channel */
  566. spin_lock(&cmb_area.lock);
  567. list_del_init(&cdev->private->cmb_list);
  568. if (list_empty(&cmb_area.list))
  569. cmf_activate(NULL, 0);
  570. spin_unlock(&cmb_area.lock);
  571. }
  572. static int
  573. set_cmbe(struct ccw_device *cdev, u32 mme)
  574. {
  575. unsigned long mba;
  576. if (!cdev->private->cmb)
  577. return -EINVAL;
  578. mba = mme ? (unsigned long) cmbe_align(cdev->private->cmb) : 0;
  579. return set_schib_wait(cdev, mme, 1, mba);
  580. }
  581. u64
  582. read_cmbe (struct ccw_device *cdev, int index)
  583. {
  584. /* yes, we have to put it on the stack
  585. * because the cmb must only be accessed
  586. * atomically, e.g. with mvc */
  587. struct cmbe cmb;
  588. unsigned long flags;
  589. u32 val;
  590. spin_lock_irqsave(cdev->ccwlock, flags);
  591. if (!cdev->private->cmb) {
  592. spin_unlock_irqrestore(cdev->ccwlock, flags);
  593. return 0;
  594. }
  595. cmb = *cmbe_align(cdev->private->cmb);
  596. spin_unlock_irqrestore(cdev->ccwlock, flags);
  597. switch (index) {
  598. case cmb_ssch_rsch_count:
  599. return cmb.ssch_rsch_count;
  600. case cmb_sample_count:
  601. return cmb.sample_count;
  602. case cmb_device_connect_time:
  603. val = cmb.device_connect_time;
  604. break;
  605. case cmb_function_pending_time:
  606. val = cmb.function_pending_time;
  607. break;
  608. case cmb_device_disconnect_time:
  609. val = cmb.device_disconnect_time;
  610. break;
  611. case cmb_control_unit_queuing_time:
  612. val = cmb.control_unit_queuing_time;
  613. break;
  614. case cmb_device_active_only_time:
  615. val = cmb.device_active_only_time;
  616. break;
  617. case cmb_device_busy_time:
  618. val = cmb.device_busy_time;
  619. break;
  620. case cmb_initial_command_response_time:
  621. val = cmb.initial_command_response_time;
  622. break;
  623. default:
  624. return 0;
  625. }
  626. return time_to_avg_nsec(val, cmb.sample_count);
  627. }
  628. static int
  629. readall_cmbe (struct ccw_device *cdev, struct cmbdata *data)
  630. {
  631. /* yes, we have to put it on the stack
  632. * because the cmb must only be accessed
  633. * atomically, e.g. with mvc */
  634. struct cmbe cmb;
  635. unsigned long flags;
  636. u64 time;
  637. spin_lock_irqsave(cdev->ccwlock, flags);
  638. if (!cdev->private->cmb) {
  639. spin_unlock_irqrestore(cdev->ccwlock, flags);
  640. return -ENODEV;
  641. }
  642. cmb = *cmbe_align(cdev->private->cmb);
  643. time = get_clock() - cdev->private->cmb_start_time;
  644. spin_unlock_irqrestore(cdev->ccwlock, flags);
  645. memset (data, 0, sizeof(struct cmbdata));
  646. /* we only know values before device_busy_time */
  647. data->size = offsetof(struct cmbdata, device_busy_time);
  648. /* conver to nanoseconds */
  649. data->elapsed_time = (time * 1000) >> 12;
  650. /* copy data to new structure */
  651. data->ssch_rsch_count = cmb.ssch_rsch_count;
  652. data->sample_count = cmb.sample_count;
  653. /* time fields are converted to nanoseconds while copying */
  654. data->device_connect_time = time_to_nsec(cmb.device_connect_time);
  655. data->function_pending_time = time_to_nsec(cmb.function_pending_time);
  656. data->device_disconnect_time = time_to_nsec(cmb.device_disconnect_time);
  657. data->control_unit_queuing_time
  658. = time_to_nsec(cmb.control_unit_queuing_time);
  659. data->device_active_only_time
  660. = time_to_nsec(cmb.device_active_only_time);
  661. data->device_busy_time = time_to_nsec(cmb.device_busy_time);
  662. data->initial_command_response_time
  663. = time_to_nsec(cmb.initial_command_response_time);
  664. return 0;
  665. }
  666. static void
  667. reset_cmbe(struct ccw_device *cdev)
  668. {
  669. struct cmbe *cmb;
  670. spin_lock_irq(cdev->ccwlock);
  671. cmb = cmbe_align(cdev->private->cmb);
  672. if (cmb)
  673. memset (cmb, 0, sizeof (*cmb));
  674. cdev->private->cmb_start_time = get_clock();
  675. spin_unlock_irq(cdev->ccwlock);
  676. }
  677. static struct attribute_group cmf_attr_group_ext;
  678. static struct cmb_operations cmbops_extended = {
  679. .alloc = alloc_cmbe,
  680. .free = free_cmbe,
  681. .set = set_cmbe,
  682. .read = read_cmbe,
  683. .readall = readall_cmbe,
  684. .reset = reset_cmbe,
  685. .attr_group = &cmf_attr_group_ext,
  686. };
  687. static ssize_t
  688. cmb_show_attr(struct device *dev, char *buf, enum cmb_index idx)
  689. {
  690. return sprintf(buf, "%lld\n",
  691. (unsigned long long) cmf_read(to_ccwdev(dev), idx));
  692. }
  693. static ssize_t
  694. cmb_show_avg_sample_interval(struct device *dev, struct device_attribute *attr, char *buf)
  695. {
  696. struct ccw_device *cdev;
  697. long interval;
  698. unsigned long count;
  699. cdev = to_ccwdev(dev);
  700. interval = get_clock() - cdev->private->cmb_start_time;
  701. count = cmf_read(cdev, cmb_sample_count);
  702. if (count)
  703. interval /= count;
  704. else
  705. interval = -1;
  706. return sprintf(buf, "%ld\n", interval);
  707. }
  708. static ssize_t
  709. cmb_show_avg_utilization(struct device *dev, struct device_attribute *attr, char *buf)
  710. {
  711. struct cmbdata data;
  712. u64 utilization;
  713. unsigned long t, u;
  714. int ret;
  715. ret = cmf_readall(to_ccwdev(dev), &data);
  716. if (ret)
  717. return ret;
  718. utilization = data.device_connect_time +
  719. data.function_pending_time +
  720. data.device_disconnect_time;
  721. /* shift to avoid long long division */
  722. while (-1ul < (data.elapsed_time | utilization)) {
  723. utilization >>= 8;
  724. data.elapsed_time >>= 8;
  725. }
  726. /* calculate value in 0.1 percent units */
  727. t = (unsigned long) data.elapsed_time / 1000;
  728. u = (unsigned long) utilization / t;
  729. return sprintf(buf, "%02ld.%01ld%%\n", u/ 10, u - (u/ 10) * 10);
  730. }
  731. #define cmf_attr(name) \
  732. static ssize_t show_ ## name (struct device * dev, struct device_attribute *attr, char * buf) \
  733. { return cmb_show_attr((dev), buf, cmb_ ## name); } \
  734. static DEVICE_ATTR(name, 0444, show_ ## name, NULL);
  735. #define cmf_attr_avg(name) \
  736. static ssize_t show_avg_ ## name (struct device * dev, struct device_attribute *attr, char * buf) \
  737. { return cmb_show_attr((dev), buf, cmb_ ## name); } \
  738. static DEVICE_ATTR(avg_ ## name, 0444, show_avg_ ## name, NULL);
  739. cmf_attr(ssch_rsch_count);
  740. cmf_attr(sample_count);
  741. cmf_attr_avg(device_connect_time);
  742. cmf_attr_avg(function_pending_time);
  743. cmf_attr_avg(device_disconnect_time);
  744. cmf_attr_avg(control_unit_queuing_time);
  745. cmf_attr_avg(device_active_only_time);
  746. cmf_attr_avg(device_busy_time);
  747. cmf_attr_avg(initial_command_response_time);
  748. static DEVICE_ATTR(avg_sample_interval, 0444, cmb_show_avg_sample_interval, NULL);
  749. static DEVICE_ATTR(avg_utilization, 0444, cmb_show_avg_utilization, NULL);
  750. static struct attribute *cmf_attributes[] = {
  751. &dev_attr_avg_sample_interval.attr,
  752. &dev_attr_avg_utilization.attr,
  753. &dev_attr_ssch_rsch_count.attr,
  754. &dev_attr_sample_count.attr,
  755. &dev_attr_avg_device_connect_time.attr,
  756. &dev_attr_avg_function_pending_time.attr,
  757. &dev_attr_avg_device_disconnect_time.attr,
  758. &dev_attr_avg_control_unit_queuing_time.attr,
  759. &dev_attr_avg_device_active_only_time.attr,
  760. 0,
  761. };
  762. static struct attribute_group cmf_attr_group = {
  763. .name = "cmf",
  764. .attrs = cmf_attributes,
  765. };
  766. static struct attribute *cmf_attributes_ext[] = {
  767. &dev_attr_avg_sample_interval.attr,
  768. &dev_attr_avg_utilization.attr,
  769. &dev_attr_ssch_rsch_count.attr,
  770. &dev_attr_sample_count.attr,
  771. &dev_attr_avg_device_connect_time.attr,
  772. &dev_attr_avg_function_pending_time.attr,
  773. &dev_attr_avg_device_disconnect_time.attr,
  774. &dev_attr_avg_control_unit_queuing_time.attr,
  775. &dev_attr_avg_device_active_only_time.attr,
  776. &dev_attr_avg_device_busy_time.attr,
  777. &dev_attr_avg_initial_command_response_time.attr,
  778. 0,
  779. };
  780. static struct attribute_group cmf_attr_group_ext = {
  781. .name = "cmf",
  782. .attrs = cmf_attributes_ext,
  783. };
  784. static ssize_t cmb_enable_show(struct device *dev, struct device_attribute *attr, char *buf)
  785. {
  786. return sprintf(buf, "%d\n", to_ccwdev(dev)->private->cmb ? 1 : 0);
  787. }
  788. static ssize_t cmb_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t c)
  789. {
  790. struct ccw_device *cdev;
  791. int ret;
  792. cdev = to_ccwdev(dev);
  793. switch (buf[0]) {
  794. case '0':
  795. ret = disable_cmf(cdev);
  796. if (ret)
  797. printk(KERN_INFO "disable_cmf failed (%d)\n", ret);
  798. break;
  799. case '1':
  800. ret = enable_cmf(cdev);
  801. if (ret && ret != -EBUSY)
  802. printk(KERN_INFO "enable_cmf failed (%d)\n", ret);
  803. break;
  804. }
  805. return c;
  806. }
  807. DEVICE_ATTR(cmb_enable, 0644, cmb_enable_show, cmb_enable_store);
  808. /* enable_cmf/disable_cmf: module interface for cmf (de)activation */
  809. int
  810. enable_cmf(struct ccw_device *cdev)
  811. {
  812. int ret;
  813. ret = cmbops->alloc(cdev);
  814. cmbops->reset(cdev);
  815. if (ret)
  816. return ret;
  817. ret = cmbops->set(cdev, 2);
  818. if (ret) {
  819. cmbops->free(cdev);
  820. return ret;
  821. }
  822. ret = sysfs_create_group(&cdev->dev.kobj, cmbops->attr_group);
  823. if (!ret)
  824. return 0;
  825. cmbops->set(cdev, 0); //FIXME: this can fail
  826. cmbops->free(cdev);
  827. return ret;
  828. }
  829. int
  830. disable_cmf(struct ccw_device *cdev)
  831. {
  832. int ret;
  833. ret = cmbops->set(cdev, 0);
  834. if (ret)
  835. return ret;
  836. cmbops->free(cdev);
  837. sysfs_remove_group(&cdev->dev.kobj, cmbops->attr_group);
  838. return ret;
  839. }
  840. u64
  841. cmf_read(struct ccw_device *cdev, int index)
  842. {
  843. return cmbops->read(cdev, index);
  844. }
  845. int
  846. cmf_readall(struct ccw_device *cdev, struct cmbdata *data)
  847. {
  848. return cmbops->readall(cdev, data);
  849. }
  850. static int __init
  851. init_cmf(void)
  852. {
  853. char *format_string;
  854. char *detect_string = "parameter";
  855. /* We cannot really autoprobe this. If the user did not give a parameter,
  856. see if we are running on z990 or up, otherwise fall back to basic mode. */
  857. if (format == CMF_AUTODETECT) {
  858. if (!css_characteristics_avail ||
  859. !css_general_characteristics.ext_mb) {
  860. format = CMF_BASIC;
  861. } else {
  862. format = CMF_EXTENDED;
  863. }
  864. detect_string = "autodetected";
  865. } else {
  866. detect_string = "parameter";
  867. }
  868. switch (format) {
  869. case CMF_BASIC:
  870. format_string = "basic";
  871. cmbops = &cmbops_basic;
  872. if (cmb_area.num_channels > 4096 || cmb_area.num_channels < 1) {
  873. printk(KERN_ERR "Basic channel measurement facility"
  874. " can only use 1 to 4096 devices\n"
  875. KERN_ERR "when the cmf driver is built"
  876. " as a loadable module\n");
  877. return 1;
  878. }
  879. break;
  880. case CMF_EXTENDED:
  881. format_string = "extended";
  882. cmbops = &cmbops_extended;
  883. break;
  884. default:
  885. printk(KERN_ERR "Invalid format %d for channel "
  886. "measurement facility\n", format);
  887. return 1;
  888. }
  889. printk(KERN_INFO "Channel measurement facility using %s format (%s)\n",
  890. format_string, detect_string);
  891. return 0;
  892. }
  893. module_init(init_cmf);
  894. MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
  895. MODULE_LICENSE("GPL");
  896. MODULE_DESCRIPTION("channel measurement facility base driver\n"
  897. "Copyright 2003 IBM Corporation\n");
  898. EXPORT_SYMBOL_GPL(enable_cmf);
  899. EXPORT_SYMBOL_GPL(disable_cmf);
  900. EXPORT_SYMBOL_GPL(cmf_read);
  901. EXPORT_SYMBOL_GPL(cmf_readall);