dasd_alias.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021
  1. /*
  2. * PAV alias management for the DASD ECKD discipline
  3. *
  4. * Copyright IBM Corporation, 2007
  5. * Author(s): Stefan Weinhuber <wein@de.ibm.com>
  6. */
  7. #define KMSG_COMPONENT "dasd-eckd"
  8. #include <linux/list.h>
  9. #include <linux/slab.h>
  10. #include <asm/ebcdic.h>
  11. #include "dasd_int.h"
  12. #include "dasd_eckd.h"
  13. #ifdef PRINTK_HEADER
  14. #undef PRINTK_HEADER
  15. #endif /* PRINTK_HEADER */
  16. #define PRINTK_HEADER "dasd(eckd):"
  17. /*
  18. * General concept of alias management:
  19. * - PAV and DASD alias management is specific to the eckd discipline.
  20. * - A device is connected to an lcu as long as the device exists.
  21. * dasd_alias_make_device_known_to_lcu will be called wenn the
  22. * device is checked by the eckd discipline and
  23. * dasd_alias_disconnect_device_from_lcu will be called
  24. * before the device is deleted.
  25. * - The dasd_alias_add_device / dasd_alias_remove_device
  26. * functions mark the point when a device is 'ready for service'.
  27. * - A summary unit check is a rare occasion, but it is mandatory to
  28. * support it. It requires some complex recovery actions before the
  29. * devices can be used again (see dasd_alias_handle_summary_unit_check).
  30. * - dasd_alias_get_start_dev will find an alias device that can be used
  31. * instead of the base device and does some (very simple) load balancing.
  32. * This is the function that gets called for each I/O, so when improving
  33. * something, this function should get faster or better, the rest has just
  34. * to be correct.
  35. */
  36. static void summary_unit_check_handling_work(struct work_struct *);
  37. static void lcu_update_work(struct work_struct *);
  38. static int _schedule_lcu_update(struct alias_lcu *, struct dasd_device *);
  39. static struct alias_root aliastree = {
  40. .serverlist = LIST_HEAD_INIT(aliastree.serverlist),
  41. .lock = __SPIN_LOCK_UNLOCKED(aliastree.lock),
  42. };
  43. static struct alias_server *_find_server(struct dasd_uid *uid)
  44. {
  45. struct alias_server *pos;
  46. list_for_each_entry(pos, &aliastree.serverlist, server) {
  47. if (!strncmp(pos->uid.vendor, uid->vendor,
  48. sizeof(uid->vendor))
  49. && !strncmp(pos->uid.serial, uid->serial,
  50. sizeof(uid->serial)))
  51. return pos;
  52. };
  53. return NULL;
  54. }
  55. static struct alias_lcu *_find_lcu(struct alias_server *server,
  56. struct dasd_uid *uid)
  57. {
  58. struct alias_lcu *pos;
  59. list_for_each_entry(pos, &server->lculist, lcu) {
  60. if (pos->uid.ssid == uid->ssid)
  61. return pos;
  62. };
  63. return NULL;
  64. }
  65. static struct alias_pav_group *_find_group(struct alias_lcu *lcu,
  66. struct dasd_uid *uid)
  67. {
  68. struct alias_pav_group *pos;
  69. __u8 search_unit_addr;
  70. /* for hyper pav there is only one group */
  71. if (lcu->pav == HYPER_PAV) {
  72. if (list_empty(&lcu->grouplist))
  73. return NULL;
  74. else
  75. return list_first_entry(&lcu->grouplist,
  76. struct alias_pav_group, group);
  77. }
  78. /* for base pav we have to find the group that matches the base */
  79. if (uid->type == UA_BASE_DEVICE)
  80. search_unit_addr = uid->real_unit_addr;
  81. else
  82. search_unit_addr = uid->base_unit_addr;
  83. list_for_each_entry(pos, &lcu->grouplist, group) {
  84. if (pos->uid.base_unit_addr == search_unit_addr &&
  85. !strncmp(pos->uid.vduit, uid->vduit, sizeof(uid->vduit)))
  86. return pos;
  87. };
  88. return NULL;
  89. }
  90. static struct alias_server *_allocate_server(struct dasd_uid *uid)
  91. {
  92. struct alias_server *server;
  93. server = kzalloc(sizeof(*server), GFP_KERNEL);
  94. if (!server)
  95. return ERR_PTR(-ENOMEM);
  96. memcpy(server->uid.vendor, uid->vendor, sizeof(uid->vendor));
  97. memcpy(server->uid.serial, uid->serial, sizeof(uid->serial));
  98. INIT_LIST_HEAD(&server->server);
  99. INIT_LIST_HEAD(&server->lculist);
  100. return server;
  101. }
  102. static void _free_server(struct alias_server *server)
  103. {
  104. kfree(server);
  105. }
  106. static struct alias_lcu *_allocate_lcu(struct dasd_uid *uid)
  107. {
  108. struct alias_lcu *lcu;
  109. lcu = kzalloc(sizeof(*lcu), GFP_KERNEL);
  110. if (!lcu)
  111. return ERR_PTR(-ENOMEM);
  112. lcu->uac = kzalloc(sizeof(*(lcu->uac)), GFP_KERNEL | GFP_DMA);
  113. if (!lcu->uac)
  114. goto out_err1;
  115. lcu->rsu_cqr = kzalloc(sizeof(*lcu->rsu_cqr), GFP_KERNEL | GFP_DMA);
  116. if (!lcu->rsu_cqr)
  117. goto out_err2;
  118. lcu->rsu_cqr->cpaddr = kzalloc(sizeof(struct ccw1),
  119. GFP_KERNEL | GFP_DMA);
  120. if (!lcu->rsu_cqr->cpaddr)
  121. goto out_err3;
  122. lcu->rsu_cqr->data = kzalloc(16, GFP_KERNEL | GFP_DMA);
  123. if (!lcu->rsu_cqr->data)
  124. goto out_err4;
  125. memcpy(lcu->uid.vendor, uid->vendor, sizeof(uid->vendor));
  126. memcpy(lcu->uid.serial, uid->serial, sizeof(uid->serial));
  127. lcu->uid.ssid = uid->ssid;
  128. lcu->pav = NO_PAV;
  129. lcu->flags = NEED_UAC_UPDATE | UPDATE_PENDING;
  130. INIT_LIST_HEAD(&lcu->lcu);
  131. INIT_LIST_HEAD(&lcu->inactive_devices);
  132. INIT_LIST_HEAD(&lcu->active_devices);
  133. INIT_LIST_HEAD(&lcu->grouplist);
  134. INIT_WORK(&lcu->suc_data.worker, summary_unit_check_handling_work);
  135. INIT_DELAYED_WORK(&lcu->ruac_data.dwork, lcu_update_work);
  136. spin_lock_init(&lcu->lock);
  137. init_completion(&lcu->lcu_setup);
  138. return lcu;
  139. out_err4:
  140. kfree(lcu->rsu_cqr->cpaddr);
  141. out_err3:
  142. kfree(lcu->rsu_cqr);
  143. out_err2:
  144. kfree(lcu->uac);
  145. out_err1:
  146. kfree(lcu);
  147. return ERR_PTR(-ENOMEM);
  148. }
  149. static void _free_lcu(struct alias_lcu *lcu)
  150. {
  151. kfree(lcu->rsu_cqr->data);
  152. kfree(lcu->rsu_cqr->cpaddr);
  153. kfree(lcu->rsu_cqr);
  154. kfree(lcu->uac);
  155. kfree(lcu);
  156. }
  157. /*
  158. * This is the function that will allocate all the server and lcu data,
  159. * so this function must be called first for a new device.
  160. * If the return value is 1, the lcu was already known before, if it
  161. * is 0, this is a new lcu.
  162. * Negative return code indicates that something went wrong (e.g. -ENOMEM)
  163. */
  164. int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
  165. {
  166. struct dasd_eckd_private *private;
  167. unsigned long flags;
  168. struct alias_server *server, *newserver;
  169. struct alias_lcu *lcu, *newlcu;
  170. int is_lcu_known;
  171. struct dasd_uid uid;
  172. private = (struct dasd_eckd_private *) device->private;
  173. device->discipline->get_uid(device, &uid);
  174. spin_lock_irqsave(&aliastree.lock, flags);
  175. is_lcu_known = 1;
  176. server = _find_server(&uid);
  177. if (!server) {
  178. spin_unlock_irqrestore(&aliastree.lock, flags);
  179. newserver = _allocate_server(&uid);
  180. if (IS_ERR(newserver))
  181. return PTR_ERR(newserver);
  182. spin_lock_irqsave(&aliastree.lock, flags);
  183. server = _find_server(&uid);
  184. if (!server) {
  185. list_add(&newserver->server, &aliastree.serverlist);
  186. server = newserver;
  187. is_lcu_known = 0;
  188. } else {
  189. /* someone was faster */
  190. _free_server(newserver);
  191. }
  192. }
  193. lcu = _find_lcu(server, &uid);
  194. if (!lcu) {
  195. spin_unlock_irqrestore(&aliastree.lock, flags);
  196. newlcu = _allocate_lcu(&uid);
  197. if (IS_ERR(newlcu))
  198. return PTR_ERR(newlcu);
  199. spin_lock_irqsave(&aliastree.lock, flags);
  200. lcu = _find_lcu(server, &uid);
  201. if (!lcu) {
  202. list_add(&newlcu->lcu, &server->lculist);
  203. lcu = newlcu;
  204. is_lcu_known = 0;
  205. } else {
  206. /* someone was faster */
  207. _free_lcu(newlcu);
  208. }
  209. is_lcu_known = 0;
  210. }
  211. spin_lock(&lcu->lock);
  212. list_add(&device->alias_list, &lcu->inactive_devices);
  213. private->lcu = lcu;
  214. spin_unlock(&lcu->lock);
  215. spin_unlock_irqrestore(&aliastree.lock, flags);
  216. return is_lcu_known;
  217. }
  218. /*
  219. * The first device to be registered on an LCU will have to do
  220. * some additional setup steps to configure that LCU on the
  221. * storage server. All further devices should wait with their
  222. * initialization until the first device is done.
  223. * To synchronize this work, the first device will call
  224. * dasd_alias_lcu_setup_complete when it is done, and all
  225. * other devices will wait for it with dasd_alias_wait_for_lcu_setup.
  226. */
  227. void dasd_alias_lcu_setup_complete(struct dasd_device *device)
  228. {
  229. struct dasd_eckd_private *private;
  230. unsigned long flags;
  231. struct alias_server *server;
  232. struct alias_lcu *lcu;
  233. struct dasd_uid uid;
  234. private = (struct dasd_eckd_private *) device->private;
  235. device->discipline->get_uid(device, &uid);
  236. lcu = NULL;
  237. spin_lock_irqsave(&aliastree.lock, flags);
  238. server = _find_server(&uid);
  239. if (server)
  240. lcu = _find_lcu(server, &uid);
  241. spin_unlock_irqrestore(&aliastree.lock, flags);
  242. if (!lcu) {
  243. DBF_EVENT_DEVID(DBF_ERR, device->cdev,
  244. "could not find lcu for %04x %02x",
  245. uid.ssid, uid.real_unit_addr);
  246. WARN_ON(1);
  247. return;
  248. }
  249. complete_all(&lcu->lcu_setup);
  250. }
  251. void dasd_alias_wait_for_lcu_setup(struct dasd_device *device)
  252. {
  253. struct dasd_eckd_private *private;
  254. unsigned long flags;
  255. struct alias_server *server;
  256. struct alias_lcu *lcu;
  257. struct dasd_uid uid;
  258. private = (struct dasd_eckd_private *) device->private;
  259. device->discipline->get_uid(device, &uid);
  260. lcu = NULL;
  261. spin_lock_irqsave(&aliastree.lock, flags);
  262. server = _find_server(&uid);
  263. if (server)
  264. lcu = _find_lcu(server, &uid);
  265. spin_unlock_irqrestore(&aliastree.lock, flags);
  266. if (!lcu) {
  267. DBF_EVENT_DEVID(DBF_ERR, device->cdev,
  268. "could not find lcu for %04x %02x",
  269. uid.ssid, uid.real_unit_addr);
  270. WARN_ON(1);
  271. return;
  272. }
  273. wait_for_completion(&lcu->lcu_setup);
  274. }
  275. /*
  276. * This function removes a device from the scope of alias management.
  277. * The complicated part is to make sure that it is not in use by
  278. * any of the workers. If necessary cancel the work.
  279. */
  280. void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
  281. {
  282. struct dasd_eckd_private *private;
  283. unsigned long flags;
  284. struct alias_lcu *lcu;
  285. struct alias_server *server;
  286. int was_pending;
  287. struct dasd_uid uid;
  288. private = (struct dasd_eckd_private *) device->private;
  289. lcu = private->lcu;
  290. /* nothing to do if already disconnected */
  291. if (!lcu)
  292. return;
  293. device->discipline->get_uid(device, &uid);
  294. spin_lock_irqsave(&lcu->lock, flags);
  295. list_del_init(&device->alias_list);
  296. /* make sure that the workers don't use this device */
  297. if (device == lcu->suc_data.device) {
  298. spin_unlock_irqrestore(&lcu->lock, flags);
  299. cancel_work_sync(&lcu->suc_data.worker);
  300. spin_lock_irqsave(&lcu->lock, flags);
  301. if (device == lcu->suc_data.device)
  302. lcu->suc_data.device = NULL;
  303. }
  304. was_pending = 0;
  305. if (device == lcu->ruac_data.device) {
  306. spin_unlock_irqrestore(&lcu->lock, flags);
  307. was_pending = 1;
  308. cancel_delayed_work_sync(&lcu->ruac_data.dwork);
  309. spin_lock_irqsave(&lcu->lock, flags);
  310. if (device == lcu->ruac_data.device)
  311. lcu->ruac_data.device = NULL;
  312. }
  313. private->lcu = NULL;
  314. spin_unlock_irqrestore(&lcu->lock, flags);
  315. spin_lock_irqsave(&aliastree.lock, flags);
  316. spin_lock(&lcu->lock);
  317. if (list_empty(&lcu->grouplist) &&
  318. list_empty(&lcu->active_devices) &&
  319. list_empty(&lcu->inactive_devices)) {
  320. list_del(&lcu->lcu);
  321. spin_unlock(&lcu->lock);
  322. _free_lcu(lcu);
  323. lcu = NULL;
  324. } else {
  325. if (was_pending)
  326. _schedule_lcu_update(lcu, NULL);
  327. spin_unlock(&lcu->lock);
  328. }
  329. server = _find_server(&uid);
  330. if (server && list_empty(&server->lculist)) {
  331. list_del(&server->server);
  332. _free_server(server);
  333. }
  334. spin_unlock_irqrestore(&aliastree.lock, flags);
  335. }
  336. /*
  337. * This function assumes that the unit address configuration stored
  338. * in the lcu is up to date and will update the device uid before
  339. * adding it to a pav group.
  340. */
  341. static int _add_device_to_lcu(struct alias_lcu *lcu,
  342. struct dasd_device *device,
  343. struct dasd_device *pos)
  344. {
  345. struct dasd_eckd_private *private;
  346. struct alias_pav_group *group;
  347. struct dasd_uid uid;
  348. unsigned long flags;
  349. private = (struct dasd_eckd_private *) device->private;
  350. /* only lock if not already locked */
  351. if (device != pos)
  352. spin_lock_irqsave_nested(get_ccwdev_lock(device->cdev), flags,
  353. CDEV_NESTED_SECOND);
  354. private->uid.type = lcu->uac->unit[private->uid.real_unit_addr].ua_type;
  355. private->uid.base_unit_addr =
  356. lcu->uac->unit[private->uid.real_unit_addr].base_ua;
  357. uid = private->uid;
  358. if (device != pos)
  359. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  360. /* if we have no PAV anyway, we don't need to bother with PAV groups */
  361. if (lcu->pav == NO_PAV) {
  362. list_move(&device->alias_list, &lcu->active_devices);
  363. return 0;
  364. }
  365. group = _find_group(lcu, &uid);
  366. if (!group) {
  367. group = kzalloc(sizeof(*group), GFP_ATOMIC);
  368. if (!group)
  369. return -ENOMEM;
  370. memcpy(group->uid.vendor, uid.vendor, sizeof(uid.vendor));
  371. memcpy(group->uid.serial, uid.serial, sizeof(uid.serial));
  372. group->uid.ssid = uid.ssid;
  373. if (uid.type == UA_BASE_DEVICE)
  374. group->uid.base_unit_addr = uid.real_unit_addr;
  375. else
  376. group->uid.base_unit_addr = uid.base_unit_addr;
  377. memcpy(group->uid.vduit, uid.vduit, sizeof(uid.vduit));
  378. INIT_LIST_HEAD(&group->group);
  379. INIT_LIST_HEAD(&group->baselist);
  380. INIT_LIST_HEAD(&group->aliaslist);
  381. list_add(&group->group, &lcu->grouplist);
  382. }
  383. if (uid.type == UA_BASE_DEVICE)
  384. list_move(&device->alias_list, &group->baselist);
  385. else
  386. list_move(&device->alias_list, &group->aliaslist);
  387. private->pavgroup = group;
  388. return 0;
  389. };
  390. static void _remove_device_from_lcu(struct alias_lcu *lcu,
  391. struct dasd_device *device)
  392. {
  393. struct dasd_eckd_private *private;
  394. struct alias_pav_group *group;
  395. private = (struct dasd_eckd_private *) device->private;
  396. list_move(&device->alias_list, &lcu->inactive_devices);
  397. group = private->pavgroup;
  398. if (!group)
  399. return;
  400. private->pavgroup = NULL;
  401. if (list_empty(&group->baselist) && list_empty(&group->aliaslist)) {
  402. list_del(&group->group);
  403. kfree(group);
  404. return;
  405. }
  406. if (group->next == device)
  407. group->next = NULL;
  408. };
  409. static int read_unit_address_configuration(struct dasd_device *device,
  410. struct alias_lcu *lcu)
  411. {
  412. struct dasd_psf_prssd_data *prssdp;
  413. struct dasd_ccw_req *cqr;
  414. struct ccw1 *ccw;
  415. int rc;
  416. unsigned long flags;
  417. cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
  418. (sizeof(struct dasd_psf_prssd_data)),
  419. device);
  420. if (IS_ERR(cqr))
  421. return PTR_ERR(cqr);
  422. cqr->startdev = device;
  423. cqr->memdev = device;
  424. clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
  425. cqr->retries = 10;
  426. cqr->expires = 20 * HZ;
  427. /* Prepare for Read Subsystem Data */
  428. prssdp = (struct dasd_psf_prssd_data *) cqr->data;
  429. memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
  430. prssdp->order = PSF_ORDER_PRSSD;
  431. prssdp->suborder = 0x0e; /* Read unit address configuration */
  432. /* all other bytes of prssdp must be zero */
  433. ccw = cqr->cpaddr;
  434. ccw->cmd_code = DASD_ECKD_CCW_PSF;
  435. ccw->count = sizeof(struct dasd_psf_prssd_data);
  436. ccw->flags |= CCW_FLAG_CC;
  437. ccw->cda = (__u32)(addr_t) prssdp;
  438. /* Read Subsystem Data - feature codes */
  439. memset(lcu->uac, 0, sizeof(*(lcu->uac)));
  440. ccw++;
  441. ccw->cmd_code = DASD_ECKD_CCW_RSSD;
  442. ccw->count = sizeof(*(lcu->uac));
  443. ccw->cda = (__u32)(addr_t) lcu->uac;
  444. cqr->buildclk = get_clock();
  445. cqr->status = DASD_CQR_FILLED;
  446. /* need to unset flag here to detect race with summary unit check */
  447. spin_lock_irqsave(&lcu->lock, flags);
  448. lcu->flags &= ~NEED_UAC_UPDATE;
  449. spin_unlock_irqrestore(&lcu->lock, flags);
  450. do {
  451. rc = dasd_sleep_on(cqr);
  452. } while (rc && (cqr->retries > 0));
  453. if (rc) {
  454. spin_lock_irqsave(&lcu->lock, flags);
  455. lcu->flags |= NEED_UAC_UPDATE;
  456. spin_unlock_irqrestore(&lcu->lock, flags);
  457. }
  458. dasd_kfree_request(cqr, cqr->memdev);
  459. return rc;
  460. }
  461. static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
  462. {
  463. unsigned long flags;
  464. struct alias_pav_group *pavgroup, *tempgroup;
  465. struct dasd_device *device, *tempdev;
  466. int i, rc;
  467. struct dasd_eckd_private *private;
  468. spin_lock_irqsave(&lcu->lock, flags);
  469. list_for_each_entry_safe(pavgroup, tempgroup, &lcu->grouplist, group) {
  470. list_for_each_entry_safe(device, tempdev, &pavgroup->baselist,
  471. alias_list) {
  472. list_move(&device->alias_list, &lcu->active_devices);
  473. private = (struct dasd_eckd_private *) device->private;
  474. private->pavgroup = NULL;
  475. }
  476. list_for_each_entry_safe(device, tempdev, &pavgroup->aliaslist,
  477. alias_list) {
  478. list_move(&device->alias_list, &lcu->active_devices);
  479. private = (struct dasd_eckd_private *) device->private;
  480. private->pavgroup = NULL;
  481. }
  482. list_del(&pavgroup->group);
  483. kfree(pavgroup);
  484. }
  485. spin_unlock_irqrestore(&lcu->lock, flags);
  486. rc = read_unit_address_configuration(refdev, lcu);
  487. if (rc)
  488. return rc;
  489. /* need to take cdev lock before lcu lock */
  490. spin_lock_irqsave_nested(get_ccwdev_lock(refdev->cdev), flags,
  491. CDEV_NESTED_FIRST);
  492. spin_lock(&lcu->lock);
  493. lcu->pav = NO_PAV;
  494. for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
  495. switch (lcu->uac->unit[i].ua_type) {
  496. case UA_BASE_PAV_ALIAS:
  497. lcu->pav = BASE_PAV;
  498. break;
  499. case UA_HYPER_PAV_ALIAS:
  500. lcu->pav = HYPER_PAV;
  501. break;
  502. }
  503. if (lcu->pav != NO_PAV)
  504. break;
  505. }
  506. list_for_each_entry_safe(device, tempdev, &lcu->active_devices,
  507. alias_list) {
  508. _add_device_to_lcu(lcu, device, refdev);
  509. }
  510. spin_unlock(&lcu->lock);
  511. spin_unlock_irqrestore(get_ccwdev_lock(refdev->cdev), flags);
  512. return 0;
  513. }
  514. static void lcu_update_work(struct work_struct *work)
  515. {
  516. struct alias_lcu *lcu;
  517. struct read_uac_work_data *ruac_data;
  518. struct dasd_device *device;
  519. unsigned long flags;
  520. int rc;
  521. ruac_data = container_of(work, struct read_uac_work_data, dwork.work);
  522. lcu = container_of(ruac_data, struct alias_lcu, ruac_data);
  523. device = ruac_data->device;
  524. rc = _lcu_update(device, lcu);
  525. /*
  526. * Need to check flags again, as there could have been another
  527. * prepare_update or a new device a new device while we were still
  528. * processing the data
  529. */
  530. spin_lock_irqsave(&lcu->lock, flags);
  531. if (rc || (lcu->flags & NEED_UAC_UPDATE)) {
  532. DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
  533. " alias data in lcu (rc = %d), retry later", rc);
  534. schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ);
  535. } else {
  536. lcu->ruac_data.device = NULL;
  537. lcu->flags &= ~UPDATE_PENDING;
  538. }
  539. spin_unlock_irqrestore(&lcu->lock, flags);
  540. }
  541. static int _schedule_lcu_update(struct alias_lcu *lcu,
  542. struct dasd_device *device)
  543. {
  544. struct dasd_device *usedev = NULL;
  545. struct alias_pav_group *group;
  546. lcu->flags |= NEED_UAC_UPDATE;
  547. if (lcu->ruac_data.device) {
  548. /* already scheduled or running */
  549. return 0;
  550. }
  551. if (device && !list_empty(&device->alias_list))
  552. usedev = device;
  553. if (!usedev && !list_empty(&lcu->grouplist)) {
  554. group = list_first_entry(&lcu->grouplist,
  555. struct alias_pav_group, group);
  556. if (!list_empty(&group->baselist))
  557. usedev = list_first_entry(&group->baselist,
  558. struct dasd_device,
  559. alias_list);
  560. else if (!list_empty(&group->aliaslist))
  561. usedev = list_first_entry(&group->aliaslist,
  562. struct dasd_device,
  563. alias_list);
  564. }
  565. if (!usedev && !list_empty(&lcu->active_devices)) {
  566. usedev = list_first_entry(&lcu->active_devices,
  567. struct dasd_device, alias_list);
  568. }
  569. /*
  570. * if we haven't found a proper device yet, give up for now, the next
  571. * device that will be set active will trigger an lcu update
  572. */
  573. if (!usedev)
  574. return -EINVAL;
  575. lcu->ruac_data.device = usedev;
  576. schedule_delayed_work(&lcu->ruac_data.dwork, 0);
  577. return 0;
  578. }
  579. int dasd_alias_add_device(struct dasd_device *device)
  580. {
  581. struct dasd_eckd_private *private;
  582. struct alias_lcu *lcu;
  583. unsigned long flags;
  584. int rc;
  585. private = (struct dasd_eckd_private *) device->private;
  586. lcu = private->lcu;
  587. rc = 0;
  588. /* need to take cdev lock before lcu lock */
  589. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  590. spin_lock(&lcu->lock);
  591. if (!(lcu->flags & UPDATE_PENDING)) {
  592. rc = _add_device_to_lcu(lcu, device, device);
  593. if (rc)
  594. lcu->flags |= UPDATE_PENDING;
  595. }
  596. if (lcu->flags & UPDATE_PENDING) {
  597. list_move(&device->alias_list, &lcu->active_devices);
  598. _schedule_lcu_update(lcu, device);
  599. }
  600. spin_unlock(&lcu->lock);
  601. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  602. return rc;
  603. }
  604. int dasd_alias_update_add_device(struct dasd_device *device)
  605. {
  606. struct dasd_eckd_private *private;
  607. private = (struct dasd_eckd_private *) device->private;
  608. private->lcu->flags |= UPDATE_PENDING;
  609. return dasd_alias_add_device(device);
  610. }
  611. int dasd_alias_remove_device(struct dasd_device *device)
  612. {
  613. struct dasd_eckd_private *private;
  614. struct alias_lcu *lcu;
  615. unsigned long flags;
  616. private = (struct dasd_eckd_private *) device->private;
  617. lcu = private->lcu;
  618. /* nothing to do if already removed */
  619. if (!lcu)
  620. return 0;
  621. spin_lock_irqsave(&lcu->lock, flags);
  622. _remove_device_from_lcu(lcu, device);
  623. spin_unlock_irqrestore(&lcu->lock, flags);
  624. return 0;
  625. }
  626. struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
  627. {
  628. struct dasd_device *alias_device;
  629. struct alias_pav_group *group;
  630. struct alias_lcu *lcu;
  631. struct dasd_eckd_private *private, *alias_priv;
  632. unsigned long flags;
  633. private = (struct dasd_eckd_private *) base_device->private;
  634. group = private->pavgroup;
  635. lcu = private->lcu;
  636. if (!group || !lcu)
  637. return NULL;
  638. if (lcu->pav == NO_PAV ||
  639. lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING))
  640. return NULL;
  641. spin_lock_irqsave(&lcu->lock, flags);
  642. alias_device = group->next;
  643. if (!alias_device) {
  644. if (list_empty(&group->aliaslist)) {
  645. spin_unlock_irqrestore(&lcu->lock, flags);
  646. return NULL;
  647. } else {
  648. alias_device = list_first_entry(&group->aliaslist,
  649. struct dasd_device,
  650. alias_list);
  651. }
  652. }
  653. if (list_is_last(&alias_device->alias_list, &group->aliaslist))
  654. group->next = list_first_entry(&group->aliaslist,
  655. struct dasd_device, alias_list);
  656. else
  657. group->next = list_first_entry(&alias_device->alias_list,
  658. struct dasd_device, alias_list);
  659. spin_unlock_irqrestore(&lcu->lock, flags);
  660. alias_priv = (struct dasd_eckd_private *) alias_device->private;
  661. if ((alias_priv->count < private->count) && !alias_device->stopped)
  662. return alias_device;
  663. else
  664. return NULL;
  665. }
  666. /*
  667. * Summary unit check handling depends on the way alias devices
  668. * are handled so it is done here rather then in dasd_eckd.c
  669. */
  670. static int reset_summary_unit_check(struct alias_lcu *lcu,
  671. struct dasd_device *device,
  672. char reason)
  673. {
  674. struct dasd_ccw_req *cqr;
  675. int rc = 0;
  676. struct ccw1 *ccw;
  677. cqr = lcu->rsu_cqr;
  678. strncpy((char *) &cqr->magic, "ECKD", 4);
  679. ASCEBC((char *) &cqr->magic, 4);
  680. ccw = cqr->cpaddr;
  681. ccw->cmd_code = DASD_ECKD_CCW_RSCK;
  682. ccw->flags = 0 ;
  683. ccw->count = 16;
  684. ccw->cda = (__u32)(addr_t) cqr->data;
  685. ((char *)cqr->data)[0] = reason;
  686. clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
  687. cqr->retries = 255; /* set retry counter to enable basic ERP */
  688. cqr->startdev = device;
  689. cqr->memdev = device;
  690. cqr->block = NULL;
  691. cqr->expires = 5 * HZ;
  692. cqr->buildclk = get_clock();
  693. cqr->status = DASD_CQR_FILLED;
  694. rc = dasd_sleep_on_immediatly(cqr);
  695. return rc;
  696. }
  697. static void _restart_all_base_devices_on_lcu(struct alias_lcu *lcu)
  698. {
  699. struct alias_pav_group *pavgroup;
  700. struct dasd_device *device;
  701. struct dasd_eckd_private *private;
  702. unsigned long flags;
  703. /* active and inactive list can contain alias as well as base devices */
  704. list_for_each_entry(device, &lcu->active_devices, alias_list) {
  705. private = (struct dasd_eckd_private *) device->private;
  706. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  707. if (private->uid.type != UA_BASE_DEVICE) {
  708. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
  709. flags);
  710. continue;
  711. }
  712. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  713. dasd_schedule_block_bh(device->block);
  714. dasd_schedule_device_bh(device);
  715. }
  716. list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
  717. private = (struct dasd_eckd_private *) device->private;
  718. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  719. if (private->uid.type != UA_BASE_DEVICE) {
  720. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
  721. flags);
  722. continue;
  723. }
  724. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  725. dasd_schedule_block_bh(device->block);
  726. dasd_schedule_device_bh(device);
  727. }
  728. list_for_each_entry(pavgroup, &lcu->grouplist, group) {
  729. list_for_each_entry(device, &pavgroup->baselist, alias_list) {
  730. dasd_schedule_block_bh(device->block);
  731. dasd_schedule_device_bh(device);
  732. }
  733. }
  734. }
  735. static void flush_all_alias_devices_on_lcu(struct alias_lcu *lcu)
  736. {
  737. struct alias_pav_group *pavgroup;
  738. struct dasd_device *device, *temp;
  739. struct dasd_eckd_private *private;
  740. int rc;
  741. unsigned long flags;
  742. LIST_HEAD(active);
  743. /*
  744. * Problem here ist that dasd_flush_device_queue may wait
  745. * for termination of a request to complete. We can't keep
  746. * the lcu lock during that time, so we must assume that
  747. * the lists may have changed.
  748. * Idea: first gather all active alias devices in a separate list,
  749. * then flush the first element of this list unlocked, and afterwards
  750. * check if it is still on the list before moving it to the
  751. * active_devices list.
  752. */
  753. spin_lock_irqsave(&lcu->lock, flags);
  754. list_for_each_entry_safe(device, temp, &lcu->active_devices,
  755. alias_list) {
  756. private = (struct dasd_eckd_private *) device->private;
  757. if (private->uid.type == UA_BASE_DEVICE)
  758. continue;
  759. list_move(&device->alias_list, &active);
  760. }
  761. list_for_each_entry(pavgroup, &lcu->grouplist, group) {
  762. list_splice_init(&pavgroup->aliaslist, &active);
  763. }
  764. while (!list_empty(&active)) {
  765. device = list_first_entry(&active, struct dasd_device,
  766. alias_list);
  767. spin_unlock_irqrestore(&lcu->lock, flags);
  768. rc = dasd_flush_device_queue(device);
  769. spin_lock_irqsave(&lcu->lock, flags);
  770. /*
  771. * only move device around if it wasn't moved away while we
  772. * were waiting for the flush
  773. */
  774. if (device == list_first_entry(&active,
  775. struct dasd_device, alias_list))
  776. list_move(&device->alias_list, &lcu->active_devices);
  777. }
  778. spin_unlock_irqrestore(&lcu->lock, flags);
  779. }
  780. static void __stop_device_on_lcu(struct dasd_device *device,
  781. struct dasd_device *pos)
  782. {
  783. /* If pos == device then device is already locked! */
  784. if (pos == device) {
  785. dasd_device_set_stop_bits(pos, DASD_STOPPED_SU);
  786. return;
  787. }
  788. spin_lock(get_ccwdev_lock(pos->cdev));
  789. dasd_device_set_stop_bits(pos, DASD_STOPPED_SU);
  790. spin_unlock(get_ccwdev_lock(pos->cdev));
  791. }
  792. /*
  793. * This function is called in interrupt context, so the
  794. * cdev lock for device is already locked!
  795. */
  796. static void _stop_all_devices_on_lcu(struct alias_lcu *lcu,
  797. struct dasd_device *device)
  798. {
  799. struct alias_pav_group *pavgroup;
  800. struct dasd_device *pos;
  801. list_for_each_entry(pos, &lcu->active_devices, alias_list)
  802. __stop_device_on_lcu(device, pos);
  803. list_for_each_entry(pos, &lcu->inactive_devices, alias_list)
  804. __stop_device_on_lcu(device, pos);
  805. list_for_each_entry(pavgroup, &lcu->grouplist, group) {
  806. list_for_each_entry(pos, &pavgroup->baselist, alias_list)
  807. __stop_device_on_lcu(device, pos);
  808. list_for_each_entry(pos, &pavgroup->aliaslist, alias_list)
  809. __stop_device_on_lcu(device, pos);
  810. }
  811. }
  812. static void _unstop_all_devices_on_lcu(struct alias_lcu *lcu)
  813. {
  814. struct alias_pav_group *pavgroup;
  815. struct dasd_device *device;
  816. unsigned long flags;
  817. list_for_each_entry(device, &lcu->active_devices, alias_list) {
  818. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  819. dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
  820. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  821. }
  822. list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
  823. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  824. dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
  825. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  826. }
  827. list_for_each_entry(pavgroup, &lcu->grouplist, group) {
  828. list_for_each_entry(device, &pavgroup->baselist, alias_list) {
  829. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  830. dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
  831. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
  832. flags);
  833. }
  834. list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
  835. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  836. dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
  837. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
  838. flags);
  839. }
  840. }
  841. }
  842. static void summary_unit_check_handling_work(struct work_struct *work)
  843. {
  844. struct alias_lcu *lcu;
  845. struct summary_unit_check_work_data *suc_data;
  846. unsigned long flags;
  847. struct dasd_device *device;
  848. suc_data = container_of(work, struct summary_unit_check_work_data,
  849. worker);
  850. lcu = container_of(suc_data, struct alias_lcu, suc_data);
  851. device = suc_data->device;
  852. /* 1. flush alias devices */
  853. flush_all_alias_devices_on_lcu(lcu);
  854. /* 2. reset summary unit check */
  855. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  856. dasd_device_remove_stop_bits(device,
  857. (DASD_STOPPED_SU | DASD_STOPPED_PENDING));
  858. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  859. reset_summary_unit_check(lcu, device, suc_data->reason);
  860. spin_lock_irqsave(&lcu->lock, flags);
  861. _unstop_all_devices_on_lcu(lcu);
  862. _restart_all_base_devices_on_lcu(lcu);
  863. /* 3. read new alias configuration */
  864. _schedule_lcu_update(lcu, device);
  865. lcu->suc_data.device = NULL;
  866. spin_unlock_irqrestore(&lcu->lock, flags);
  867. }
  868. /*
  869. * note: this will be called from int handler context (cdev locked)
  870. */
  871. void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
  872. struct irb *irb)
  873. {
  874. struct alias_lcu *lcu;
  875. char reason;
  876. struct dasd_eckd_private *private;
  877. char *sense;
  878. private = (struct dasd_eckd_private *) device->private;
  879. sense = dasd_get_sense(irb);
  880. if (sense) {
  881. reason = sense[8];
  882. DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
  883. "eckd handle summary unit check: reason", reason);
  884. } else {
  885. DBF_DEV_EVENT(DBF_WARNING, device, "%s",
  886. "eckd handle summary unit check:"
  887. " no reason code available");
  888. return;
  889. }
  890. lcu = private->lcu;
  891. if (!lcu) {
  892. DBF_DEV_EVENT(DBF_WARNING, device, "%s",
  893. "device not ready to handle summary"
  894. " unit check (no lcu structure)");
  895. return;
  896. }
  897. spin_lock(&lcu->lock);
  898. _stop_all_devices_on_lcu(lcu, device);
  899. /* prepare for lcu_update */
  900. private->lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING;
  901. /* If this device is about to be removed just return and wait for
  902. * the next interrupt on a different device
  903. */
  904. if (list_empty(&device->alias_list)) {
  905. DBF_DEV_EVENT(DBF_WARNING, device, "%s",
  906. "device is in offline processing,"
  907. " don't do summary unit check handling");
  908. spin_unlock(&lcu->lock);
  909. return;
  910. }
  911. if (lcu->suc_data.device) {
  912. /* already scheduled or running */
  913. DBF_DEV_EVENT(DBF_WARNING, device, "%s",
  914. "previous instance of summary unit check worker"
  915. " still pending");
  916. spin_unlock(&lcu->lock);
  917. return ;
  918. }
  919. lcu->suc_data.reason = reason;
  920. lcu->suc_data.device = device;
  921. spin_unlock(&lcu->lock);
  922. schedule_work(&lcu->suc_data.worker);
  923. };