dasd_alias.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896
  1. /*
  2. * PAV alias management for the DASD ECKD discipline
  3. *
  4. * Copyright IBM Corporation, 2007
  5. * Author(s): Stefan Weinhuber <wein@de.ibm.com>
  6. */
  7. #include <linux/list.h>
  8. #include <asm/ebcdic.h>
  9. #include "dasd_int.h"
  10. #include "dasd_eckd.h"
  11. #ifdef PRINTK_HEADER
  12. #undef PRINTK_HEADER
  13. #endif /* PRINTK_HEADER */
  14. #define PRINTK_HEADER "dasd(eckd):"
  15. /*
  16. * General concept of alias management:
  17. * - PAV and DASD alias management is specific to the eckd discipline.
  18. * - A device is connected to an lcu as long as the device exists.
  19. * dasd_alias_make_device_known_to_lcu will be called wenn the
  20. * device is checked by the eckd discipline and
  21. * dasd_alias_disconnect_device_from_lcu will be called
  22. * before the device is deleted.
  23. * - The dasd_alias_add_device / dasd_alias_remove_device
  24. * functions mark the point when a device is 'ready for service'.
  25. * - A summary unit check is a rare occasion, but it is mandatory to
  26. * support it. It requires some complex recovery actions before the
  27. * devices can be used again (see dasd_alias_handle_summary_unit_check).
  28. * - dasd_alias_get_start_dev will find an alias device that can be used
  29. * instead of the base device and does some (very simple) load balancing.
  30. * This is the function that gets called for each I/O, so when improving
  31. * something, this function should get faster or better, the rest has just
  32. * to be correct.
  33. */
  34. static void summary_unit_check_handling_work(struct work_struct *);
  35. static void lcu_update_work(struct work_struct *);
  36. static int _schedule_lcu_update(struct alias_lcu *, struct dasd_device *);
  37. static struct alias_root aliastree = {
  38. .serverlist = LIST_HEAD_INIT(aliastree.serverlist),
  39. .lock = __SPIN_LOCK_UNLOCKED(aliastree.lock),
  40. };
  41. static struct alias_server *_find_server(struct dasd_uid *uid)
  42. {
  43. struct alias_server *pos;
  44. list_for_each_entry(pos, &aliastree.serverlist, server) {
  45. if (!strncmp(pos->uid.vendor, uid->vendor,
  46. sizeof(uid->vendor))
  47. && !strncmp(pos->uid.serial, uid->serial,
  48. sizeof(uid->serial)))
  49. return pos;
  50. };
  51. return NULL;
  52. }
  53. static struct alias_lcu *_find_lcu(struct alias_server *server,
  54. struct dasd_uid *uid)
  55. {
  56. struct alias_lcu *pos;
  57. list_for_each_entry(pos, &server->lculist, lcu) {
  58. if (pos->uid.ssid == uid->ssid)
  59. return pos;
  60. };
  61. return NULL;
  62. }
  63. static struct alias_pav_group *_find_group(struct alias_lcu *lcu,
  64. struct dasd_uid *uid)
  65. {
  66. struct alias_pav_group *pos;
  67. __u8 search_unit_addr;
  68. /* for hyper pav there is only one group */
  69. if (lcu->pav == HYPER_PAV) {
  70. if (list_empty(&lcu->grouplist))
  71. return NULL;
  72. else
  73. return list_first_entry(&lcu->grouplist,
  74. struct alias_pav_group, group);
  75. }
  76. /* for base pav we have to find the group that matches the base */
  77. if (uid->type == UA_BASE_DEVICE)
  78. search_unit_addr = uid->real_unit_addr;
  79. else
  80. search_unit_addr = uid->base_unit_addr;
  81. list_for_each_entry(pos, &lcu->grouplist, group) {
  82. if (pos->uid.base_unit_addr == search_unit_addr)
  83. return pos;
  84. };
  85. return NULL;
  86. }
  87. static struct alias_server *_allocate_server(struct dasd_uid *uid)
  88. {
  89. struct alias_server *server;
  90. server = kzalloc(sizeof(*server), GFP_KERNEL);
  91. if (!server)
  92. return ERR_PTR(-ENOMEM);
  93. memcpy(server->uid.vendor, uid->vendor, sizeof(uid->vendor));
  94. memcpy(server->uid.serial, uid->serial, sizeof(uid->serial));
  95. INIT_LIST_HEAD(&server->server);
  96. INIT_LIST_HEAD(&server->lculist);
  97. return server;
  98. }
  99. static void _free_server(struct alias_server *server)
  100. {
  101. kfree(server);
  102. }
  103. static struct alias_lcu *_allocate_lcu(struct dasd_uid *uid)
  104. {
  105. struct alias_lcu *lcu;
  106. lcu = kzalloc(sizeof(*lcu), GFP_KERNEL);
  107. if (!lcu)
  108. return ERR_PTR(-ENOMEM);
  109. lcu->uac = kzalloc(sizeof(*(lcu->uac)), GFP_KERNEL | GFP_DMA);
  110. if (!lcu->uac)
  111. goto out_err1;
  112. lcu->rsu_cqr = kzalloc(sizeof(*lcu->rsu_cqr), GFP_KERNEL | GFP_DMA);
  113. if (!lcu->rsu_cqr)
  114. goto out_err2;
  115. lcu->rsu_cqr->cpaddr = kzalloc(sizeof(struct ccw1),
  116. GFP_KERNEL | GFP_DMA);
  117. if (!lcu->rsu_cqr->cpaddr)
  118. goto out_err3;
  119. lcu->rsu_cqr->data = kzalloc(16, GFP_KERNEL | GFP_DMA);
  120. if (!lcu->rsu_cqr->data)
  121. goto out_err4;
  122. memcpy(lcu->uid.vendor, uid->vendor, sizeof(uid->vendor));
  123. memcpy(lcu->uid.serial, uid->serial, sizeof(uid->serial));
  124. lcu->uid.ssid = uid->ssid;
  125. lcu->pav = NO_PAV;
  126. lcu->flags = NEED_UAC_UPDATE | UPDATE_PENDING;
  127. INIT_LIST_HEAD(&lcu->lcu);
  128. INIT_LIST_HEAD(&lcu->inactive_devices);
  129. INIT_LIST_HEAD(&lcu->active_devices);
  130. INIT_LIST_HEAD(&lcu->grouplist);
  131. INIT_WORK(&lcu->suc_data.worker, summary_unit_check_handling_work);
  132. INIT_DELAYED_WORK(&lcu->ruac_data.dwork, lcu_update_work);
  133. spin_lock_init(&lcu->lock);
  134. return lcu;
  135. out_err4:
  136. kfree(lcu->rsu_cqr->cpaddr);
  137. out_err3:
  138. kfree(lcu->rsu_cqr);
  139. out_err2:
  140. kfree(lcu->uac);
  141. out_err1:
  142. kfree(lcu);
  143. return ERR_PTR(-ENOMEM);
  144. }
  145. static void _free_lcu(struct alias_lcu *lcu)
  146. {
  147. kfree(lcu->rsu_cqr->data);
  148. kfree(lcu->rsu_cqr->cpaddr);
  149. kfree(lcu->rsu_cqr);
  150. kfree(lcu->uac);
  151. kfree(lcu);
  152. }
  153. /*
  154. * This is the function that will allocate all the server and lcu data,
  155. * so this function must be called first for a new device.
  156. * If the return value is 1, the lcu was already known before, if it
  157. * is 0, this is a new lcu.
  158. * Negative return code indicates that something went wrong (e.g. -ENOMEM)
  159. */
  160. int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
  161. {
  162. struct dasd_eckd_private *private;
  163. unsigned long flags;
  164. struct alias_server *server, *newserver;
  165. struct alias_lcu *lcu, *newlcu;
  166. int is_lcu_known;
  167. struct dasd_uid *uid;
  168. private = (struct dasd_eckd_private *) device->private;
  169. uid = &private->uid;
  170. spin_lock_irqsave(&aliastree.lock, flags);
  171. is_lcu_known = 1;
  172. server = _find_server(uid);
  173. if (!server) {
  174. spin_unlock_irqrestore(&aliastree.lock, flags);
  175. newserver = _allocate_server(uid);
  176. if (IS_ERR(newserver))
  177. return PTR_ERR(newserver);
  178. spin_lock_irqsave(&aliastree.lock, flags);
  179. server = _find_server(uid);
  180. if (!server) {
  181. list_add(&newserver->server, &aliastree.serverlist);
  182. server = newserver;
  183. is_lcu_known = 0;
  184. } else {
  185. /* someone was faster */
  186. _free_server(newserver);
  187. }
  188. }
  189. lcu = _find_lcu(server, uid);
  190. if (!lcu) {
  191. spin_unlock_irqrestore(&aliastree.lock, flags);
  192. newlcu = _allocate_lcu(uid);
  193. if (IS_ERR(newlcu))
  194. return PTR_ERR(lcu);
  195. spin_lock_irqsave(&aliastree.lock, flags);
  196. lcu = _find_lcu(server, uid);
  197. if (!lcu) {
  198. list_add(&newlcu->lcu, &server->lculist);
  199. lcu = newlcu;
  200. is_lcu_known = 0;
  201. } else {
  202. /* someone was faster */
  203. _free_lcu(newlcu);
  204. }
  205. is_lcu_known = 0;
  206. }
  207. spin_lock(&lcu->lock);
  208. list_add(&device->alias_list, &lcu->inactive_devices);
  209. private->lcu = lcu;
  210. spin_unlock(&lcu->lock);
  211. spin_unlock_irqrestore(&aliastree.lock, flags);
  212. return is_lcu_known;
  213. }
  214. /*
  215. * This function removes a device from the scope of alias management.
  216. * The complicated part is to make sure that it is not in use by
  217. * any of the workers. If necessary cancel the work.
  218. */
  219. void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
  220. {
  221. struct dasd_eckd_private *private;
  222. unsigned long flags;
  223. struct alias_lcu *lcu;
  224. struct alias_server *server;
  225. int was_pending;
  226. private = (struct dasd_eckd_private *) device->private;
  227. lcu = private->lcu;
  228. spin_lock_irqsave(&lcu->lock, flags);
  229. list_del_init(&device->alias_list);
  230. /* make sure that the workers don't use this device */
  231. if (device == lcu->suc_data.device) {
  232. spin_unlock_irqrestore(&lcu->lock, flags);
  233. cancel_work_sync(&lcu->suc_data.worker);
  234. spin_lock_irqsave(&lcu->lock, flags);
  235. if (device == lcu->suc_data.device)
  236. lcu->suc_data.device = NULL;
  237. }
  238. was_pending = 0;
  239. if (device == lcu->ruac_data.device) {
  240. spin_unlock_irqrestore(&lcu->lock, flags);
  241. was_pending = 1;
  242. cancel_delayed_work_sync(&lcu->ruac_data.dwork);
  243. spin_lock_irqsave(&lcu->lock, flags);
  244. if (device == lcu->ruac_data.device)
  245. lcu->ruac_data.device = NULL;
  246. }
  247. private->lcu = NULL;
  248. spin_unlock_irqrestore(&lcu->lock, flags);
  249. spin_lock_irqsave(&aliastree.lock, flags);
  250. spin_lock(&lcu->lock);
  251. if (list_empty(&lcu->grouplist) &&
  252. list_empty(&lcu->active_devices) &&
  253. list_empty(&lcu->inactive_devices)) {
  254. list_del(&lcu->lcu);
  255. spin_unlock(&lcu->lock);
  256. _free_lcu(lcu);
  257. lcu = NULL;
  258. } else {
  259. if (was_pending)
  260. _schedule_lcu_update(lcu, NULL);
  261. spin_unlock(&lcu->lock);
  262. }
  263. server = _find_server(&private->uid);
  264. if (server && list_empty(&server->lculist)) {
  265. list_del(&server->server);
  266. _free_server(server);
  267. }
  268. spin_unlock_irqrestore(&aliastree.lock, flags);
  269. }
  270. /*
  271. * This function assumes that the unit address configuration stored
  272. * in the lcu is up to date and will update the device uid before
  273. * adding it to a pav group.
  274. */
  275. static int _add_device_to_lcu(struct alias_lcu *lcu,
  276. struct dasd_device *device)
  277. {
  278. struct dasd_eckd_private *private;
  279. struct alias_pav_group *group;
  280. struct dasd_uid *uid;
  281. private = (struct dasd_eckd_private *) device->private;
  282. uid = &private->uid;
  283. uid->type = lcu->uac->unit[uid->real_unit_addr].ua_type;
  284. uid->base_unit_addr = lcu->uac->unit[uid->real_unit_addr].base_ua;
  285. dasd_set_uid(device->cdev, &private->uid);
  286. /* if we have no PAV anyway, we don't need to bother with PAV groups */
  287. if (lcu->pav == NO_PAV) {
  288. list_move(&device->alias_list, &lcu->active_devices);
  289. return 0;
  290. }
  291. group = _find_group(lcu, uid);
  292. if (!group) {
  293. group = kzalloc(sizeof(*group), GFP_ATOMIC);
  294. if (!group)
  295. return -ENOMEM;
  296. memcpy(group->uid.vendor, uid->vendor, sizeof(uid->vendor));
  297. memcpy(group->uid.serial, uid->serial, sizeof(uid->serial));
  298. group->uid.ssid = uid->ssid;
  299. if (uid->type == UA_BASE_DEVICE)
  300. group->uid.base_unit_addr = uid->real_unit_addr;
  301. else
  302. group->uid.base_unit_addr = uid->base_unit_addr;
  303. INIT_LIST_HEAD(&group->group);
  304. INIT_LIST_HEAD(&group->baselist);
  305. INIT_LIST_HEAD(&group->aliaslist);
  306. list_add(&group->group, &lcu->grouplist);
  307. }
  308. if (uid->type == UA_BASE_DEVICE)
  309. list_move(&device->alias_list, &group->baselist);
  310. else
  311. list_move(&device->alias_list, &group->aliaslist);
  312. private->pavgroup = group;
  313. return 0;
  314. };
  315. static void _remove_device_from_lcu(struct alias_lcu *lcu,
  316. struct dasd_device *device)
  317. {
  318. struct dasd_eckd_private *private;
  319. struct alias_pav_group *group;
  320. private = (struct dasd_eckd_private *) device->private;
  321. list_move(&device->alias_list, &lcu->inactive_devices);
  322. group = private->pavgroup;
  323. if (!group)
  324. return;
  325. private->pavgroup = NULL;
  326. if (list_empty(&group->baselist) && list_empty(&group->aliaslist)) {
  327. list_del(&group->group);
  328. kfree(group);
  329. return;
  330. }
  331. if (group->next == device)
  332. group->next = NULL;
  333. };
  334. static int read_unit_address_configuration(struct dasd_device *device,
  335. struct alias_lcu *lcu)
  336. {
  337. struct dasd_psf_prssd_data *prssdp;
  338. struct dasd_ccw_req *cqr;
  339. struct ccw1 *ccw;
  340. int rc;
  341. unsigned long flags;
  342. cqr = dasd_kmalloc_request("ECKD",
  343. 1 /* PSF */ + 1 /* RSSD */ ,
  344. (sizeof(struct dasd_psf_prssd_data)),
  345. device);
  346. if (IS_ERR(cqr))
  347. return PTR_ERR(cqr);
  348. cqr->startdev = device;
  349. cqr->memdev = device;
  350. clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
  351. cqr->retries = 10;
  352. cqr->expires = 20 * HZ;
  353. /* Prepare for Read Subsystem Data */
  354. prssdp = (struct dasd_psf_prssd_data *) cqr->data;
  355. memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
  356. prssdp->order = PSF_ORDER_PRSSD;
  357. prssdp->suborder = 0x0e; /* Read unit address configuration */
  358. /* all other bytes of prssdp must be zero */
  359. ccw = cqr->cpaddr;
  360. ccw->cmd_code = DASD_ECKD_CCW_PSF;
  361. ccw->count = sizeof(struct dasd_psf_prssd_data);
  362. ccw->flags |= CCW_FLAG_CC;
  363. ccw->cda = (__u32)(addr_t) prssdp;
  364. /* Read Subsystem Data - feature codes */
  365. memset(lcu->uac, 0, sizeof(*(lcu->uac)));
  366. ccw++;
  367. ccw->cmd_code = DASD_ECKD_CCW_RSSD;
  368. ccw->count = sizeof(*(lcu->uac));
  369. ccw->cda = (__u32)(addr_t) lcu->uac;
  370. cqr->buildclk = get_clock();
  371. cqr->status = DASD_CQR_FILLED;
  372. /* need to unset flag here to detect race with summary unit check */
  373. spin_lock_irqsave(&lcu->lock, flags);
  374. lcu->flags &= ~NEED_UAC_UPDATE;
  375. spin_unlock_irqrestore(&lcu->lock, flags);
  376. do {
  377. rc = dasd_sleep_on(cqr);
  378. } while (rc && (cqr->retries > 0));
  379. if (rc) {
  380. spin_lock_irqsave(&lcu->lock, flags);
  381. lcu->flags |= NEED_UAC_UPDATE;
  382. spin_unlock_irqrestore(&lcu->lock, flags);
  383. }
  384. dasd_kfree_request(cqr, cqr->memdev);
  385. return rc;
  386. }
  387. static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
  388. {
  389. unsigned long flags;
  390. struct alias_pav_group *pavgroup, *tempgroup;
  391. struct dasd_device *device, *tempdev;
  392. int i, rc;
  393. struct dasd_eckd_private *private;
  394. spin_lock_irqsave(&lcu->lock, flags);
  395. list_for_each_entry_safe(pavgroup, tempgroup, &lcu->grouplist, group) {
  396. list_for_each_entry_safe(device, tempdev, &pavgroup->baselist,
  397. alias_list) {
  398. list_move(&device->alias_list, &lcu->active_devices);
  399. private = (struct dasd_eckd_private *) device->private;
  400. private->pavgroup = NULL;
  401. }
  402. list_for_each_entry_safe(device, tempdev, &pavgroup->aliaslist,
  403. alias_list) {
  404. list_move(&device->alias_list, &lcu->active_devices);
  405. private = (struct dasd_eckd_private *) device->private;
  406. private->pavgroup = NULL;
  407. }
  408. list_del(&pavgroup->group);
  409. kfree(pavgroup);
  410. }
  411. spin_unlock_irqrestore(&lcu->lock, flags);
  412. rc = read_unit_address_configuration(refdev, lcu);
  413. if (rc)
  414. return rc;
  415. spin_lock_irqsave(&lcu->lock, flags);
  416. lcu->pav = NO_PAV;
  417. for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
  418. switch (lcu->uac->unit[i].ua_type) {
  419. case UA_BASE_PAV_ALIAS:
  420. lcu->pav = BASE_PAV;
  421. break;
  422. case UA_HYPER_PAV_ALIAS:
  423. lcu->pav = HYPER_PAV;
  424. break;
  425. }
  426. if (lcu->pav != NO_PAV)
  427. break;
  428. }
  429. list_for_each_entry_safe(device, tempdev, &lcu->active_devices,
  430. alias_list) {
  431. _add_device_to_lcu(lcu, device);
  432. }
  433. spin_unlock_irqrestore(&lcu->lock, flags);
  434. return 0;
  435. }
  436. static void lcu_update_work(struct work_struct *work)
  437. {
  438. struct alias_lcu *lcu;
  439. struct read_uac_work_data *ruac_data;
  440. struct dasd_device *device;
  441. unsigned long flags;
  442. int rc;
  443. ruac_data = container_of(work, struct read_uac_work_data, dwork.work);
  444. lcu = container_of(ruac_data, struct alias_lcu, ruac_data);
  445. device = ruac_data->device;
  446. rc = _lcu_update(device, lcu);
  447. /*
  448. * Need to check flags again, as there could have been another
  449. * prepare_update or a new device a new device while we were still
  450. * processing the data
  451. */
  452. spin_lock_irqsave(&lcu->lock, flags);
  453. if (rc || (lcu->flags & NEED_UAC_UPDATE)) {
  454. DEV_MESSAGE(KERN_WARNING, device, "could not update"
  455. " alias data in lcu (rc = %d), retry later", rc);
  456. schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ);
  457. } else {
  458. lcu->ruac_data.device = NULL;
  459. lcu->flags &= ~UPDATE_PENDING;
  460. }
  461. spin_unlock_irqrestore(&lcu->lock, flags);
  462. }
  463. static int _schedule_lcu_update(struct alias_lcu *lcu,
  464. struct dasd_device *device)
  465. {
  466. struct dasd_device *usedev = NULL;
  467. struct alias_pav_group *group;
  468. lcu->flags |= NEED_UAC_UPDATE;
  469. if (lcu->ruac_data.device) {
  470. /* already scheduled or running */
  471. return 0;
  472. }
  473. if (device && !list_empty(&device->alias_list))
  474. usedev = device;
  475. if (!usedev && !list_empty(&lcu->grouplist)) {
  476. group = list_first_entry(&lcu->grouplist,
  477. struct alias_pav_group, group);
  478. if (!list_empty(&group->baselist))
  479. usedev = list_first_entry(&group->baselist,
  480. struct dasd_device,
  481. alias_list);
  482. else if (!list_empty(&group->aliaslist))
  483. usedev = list_first_entry(&group->aliaslist,
  484. struct dasd_device,
  485. alias_list);
  486. }
  487. if (!usedev && !list_empty(&lcu->active_devices)) {
  488. usedev = list_first_entry(&lcu->active_devices,
  489. struct dasd_device, alias_list);
  490. }
  491. /*
  492. * if we haven't found a proper device yet, give up for now, the next
  493. * device that will be set active will trigger an lcu update
  494. */
  495. if (!usedev)
  496. return -EINVAL;
  497. lcu->ruac_data.device = usedev;
  498. schedule_delayed_work(&lcu->ruac_data.dwork, 0);
  499. return 0;
  500. }
  501. int dasd_alias_add_device(struct dasd_device *device)
  502. {
  503. struct dasd_eckd_private *private;
  504. struct alias_lcu *lcu;
  505. unsigned long flags;
  506. int rc;
  507. private = (struct dasd_eckd_private *) device->private;
  508. lcu = private->lcu;
  509. rc = 0;
  510. spin_lock_irqsave(&lcu->lock, flags);
  511. if (!(lcu->flags & UPDATE_PENDING)) {
  512. rc = _add_device_to_lcu(lcu, device);
  513. if (rc)
  514. lcu->flags |= UPDATE_PENDING;
  515. }
  516. if (lcu->flags & UPDATE_PENDING) {
  517. list_move(&device->alias_list, &lcu->active_devices);
  518. _schedule_lcu_update(lcu, device);
  519. }
  520. spin_unlock_irqrestore(&lcu->lock, flags);
  521. return rc;
  522. }
  523. int dasd_alias_remove_device(struct dasd_device *device)
  524. {
  525. struct dasd_eckd_private *private;
  526. struct alias_lcu *lcu;
  527. unsigned long flags;
  528. private = (struct dasd_eckd_private *) device->private;
  529. lcu = private->lcu;
  530. spin_lock_irqsave(&lcu->lock, flags);
  531. _remove_device_from_lcu(lcu, device);
  532. spin_unlock_irqrestore(&lcu->lock, flags);
  533. return 0;
  534. }
  535. struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
  536. {
  537. struct dasd_device *alias_device;
  538. struct alias_pav_group *group;
  539. struct alias_lcu *lcu;
  540. struct dasd_eckd_private *private, *alias_priv;
  541. unsigned long flags;
  542. private = (struct dasd_eckd_private *) base_device->private;
  543. group = private->pavgroup;
  544. lcu = private->lcu;
  545. if (!group || !lcu)
  546. return NULL;
  547. if (lcu->pav == NO_PAV ||
  548. lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING))
  549. return NULL;
  550. spin_lock_irqsave(&lcu->lock, flags);
  551. alias_device = group->next;
  552. if (!alias_device) {
  553. if (list_empty(&group->aliaslist)) {
  554. spin_unlock_irqrestore(&lcu->lock, flags);
  555. return NULL;
  556. } else {
  557. alias_device = list_first_entry(&group->aliaslist,
  558. struct dasd_device,
  559. alias_list);
  560. }
  561. }
  562. if (list_is_last(&alias_device->alias_list, &group->aliaslist))
  563. group->next = list_first_entry(&group->aliaslist,
  564. struct dasd_device, alias_list);
  565. else
  566. group->next = list_first_entry(&alias_device->alias_list,
  567. struct dasd_device, alias_list);
  568. spin_unlock_irqrestore(&lcu->lock, flags);
  569. alias_priv = (struct dasd_eckd_private *) alias_device->private;
  570. if ((alias_priv->count < private->count) && !alias_device->stopped)
  571. return alias_device;
  572. else
  573. return NULL;
  574. }
  575. /*
  576. * Summary unit check handling depends on the way alias devices
  577. * are handled so it is done here rather then in dasd_eckd.c
  578. */
  579. static int reset_summary_unit_check(struct alias_lcu *lcu,
  580. struct dasd_device *device,
  581. char reason)
  582. {
  583. struct dasd_ccw_req *cqr;
  584. int rc = 0;
  585. cqr = lcu->rsu_cqr;
  586. strncpy((char *) &cqr->magic, "ECKD", 4);
  587. ASCEBC((char *) &cqr->magic, 4);
  588. cqr->cpaddr->cmd_code = DASD_ECKD_CCW_RSCK;
  589. cqr->cpaddr->flags = 0 ;
  590. cqr->cpaddr->count = 16;
  591. cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
  592. ((char *)cqr->data)[0] = reason;
  593. clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
  594. cqr->retries = 255; /* set retry counter to enable basic ERP */
  595. cqr->startdev = device;
  596. cqr->memdev = device;
  597. cqr->block = NULL;
  598. cqr->expires = 5 * HZ;
  599. cqr->buildclk = get_clock();
  600. cqr->status = DASD_CQR_FILLED;
  601. rc = dasd_sleep_on_immediatly(cqr);
  602. return rc;
  603. }
  604. static void _restart_all_base_devices_on_lcu(struct alias_lcu *lcu)
  605. {
  606. struct alias_pav_group *pavgroup;
  607. struct dasd_device *device;
  608. struct dasd_eckd_private *private;
  609. /* active and inactive list can contain alias as well as base devices */
  610. list_for_each_entry(device, &lcu->active_devices, alias_list) {
  611. private = (struct dasd_eckd_private *) device->private;
  612. if (private->uid.type != UA_BASE_DEVICE)
  613. continue;
  614. dasd_schedule_block_bh(device->block);
  615. dasd_schedule_device_bh(device);
  616. }
  617. list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
  618. private = (struct dasd_eckd_private *) device->private;
  619. if (private->uid.type != UA_BASE_DEVICE)
  620. continue;
  621. dasd_schedule_block_bh(device->block);
  622. dasd_schedule_device_bh(device);
  623. }
  624. list_for_each_entry(pavgroup, &lcu->grouplist, group) {
  625. list_for_each_entry(device, &pavgroup->baselist, alias_list) {
  626. dasd_schedule_block_bh(device->block);
  627. dasd_schedule_device_bh(device);
  628. }
  629. }
  630. }
  631. static void flush_all_alias_devices_on_lcu(struct alias_lcu *lcu)
  632. {
  633. struct alias_pav_group *pavgroup;
  634. struct dasd_device *device, *temp;
  635. struct dasd_eckd_private *private;
  636. int rc;
  637. unsigned long flags;
  638. LIST_HEAD(active);
  639. /*
  640. * Problem here ist that dasd_flush_device_queue may wait
  641. * for termination of a request to complete. We can't keep
  642. * the lcu lock during that time, so we must assume that
  643. * the lists may have changed.
  644. * Idea: first gather all active alias devices in a separate list,
  645. * then flush the first element of this list unlocked, and afterwards
  646. * check if it is still on the list before moving it to the
  647. * active_devices list.
  648. */
  649. spin_lock_irqsave(&lcu->lock, flags);
  650. list_for_each_entry_safe(device, temp, &lcu->active_devices,
  651. alias_list) {
  652. private = (struct dasd_eckd_private *) device->private;
  653. if (private->uid.type == UA_BASE_DEVICE)
  654. continue;
  655. list_move(&device->alias_list, &active);
  656. }
  657. list_for_each_entry(pavgroup, &lcu->grouplist, group) {
  658. list_splice_init(&pavgroup->aliaslist, &active);
  659. }
  660. while (!list_empty(&active)) {
  661. device = list_first_entry(&active, struct dasd_device,
  662. alias_list);
  663. spin_unlock_irqrestore(&lcu->lock, flags);
  664. rc = dasd_flush_device_queue(device);
  665. spin_lock_irqsave(&lcu->lock, flags);
  666. /*
  667. * only move device around if it wasn't moved away while we
  668. * were waiting for the flush
  669. */
  670. if (device == list_first_entry(&active,
  671. struct dasd_device, alias_list))
  672. list_move(&device->alias_list, &lcu->active_devices);
  673. }
  674. spin_unlock_irqrestore(&lcu->lock, flags);
  675. }
  676. static void __stop_device_on_lcu(struct dasd_device *device,
  677. struct dasd_device *pos)
  678. {
  679. /* If pos == device then device is already locked! */
  680. if (pos == device) {
  681. pos->stopped |= DASD_STOPPED_SU;
  682. return;
  683. }
  684. spin_lock(get_ccwdev_lock(pos->cdev));
  685. pos->stopped |= DASD_STOPPED_SU;
  686. spin_unlock(get_ccwdev_lock(pos->cdev));
  687. }
  688. /*
  689. * This function is called in interrupt context, so the
  690. * cdev lock for device is already locked!
  691. */
  692. static void _stop_all_devices_on_lcu(struct alias_lcu *lcu,
  693. struct dasd_device *device)
  694. {
  695. struct alias_pav_group *pavgroup;
  696. struct dasd_device *pos;
  697. list_for_each_entry(pos, &lcu->active_devices, alias_list)
  698. __stop_device_on_lcu(device, pos);
  699. list_for_each_entry(pos, &lcu->inactive_devices, alias_list)
  700. __stop_device_on_lcu(device, pos);
  701. list_for_each_entry(pavgroup, &lcu->grouplist, group) {
  702. list_for_each_entry(pos, &pavgroup->baselist, alias_list)
  703. __stop_device_on_lcu(device, pos);
  704. list_for_each_entry(pos, &pavgroup->aliaslist, alias_list)
  705. __stop_device_on_lcu(device, pos);
  706. }
  707. }
  708. static void _unstop_all_devices_on_lcu(struct alias_lcu *lcu)
  709. {
  710. struct alias_pav_group *pavgroup;
  711. struct dasd_device *device;
  712. unsigned long flags;
  713. list_for_each_entry(device, &lcu->active_devices, alias_list) {
  714. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  715. device->stopped &= ~DASD_STOPPED_SU;
  716. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  717. }
  718. list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
  719. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  720. device->stopped &= ~DASD_STOPPED_SU;
  721. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  722. }
  723. list_for_each_entry(pavgroup, &lcu->grouplist, group) {
  724. list_for_each_entry(device, &pavgroup->baselist, alias_list) {
  725. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  726. device->stopped &= ~DASD_STOPPED_SU;
  727. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
  728. flags);
  729. }
  730. list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
  731. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  732. device->stopped &= ~DASD_STOPPED_SU;
  733. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
  734. flags);
  735. }
  736. }
  737. }
  738. static void summary_unit_check_handling_work(struct work_struct *work)
  739. {
  740. struct alias_lcu *lcu;
  741. struct summary_unit_check_work_data *suc_data;
  742. unsigned long flags;
  743. struct dasd_device *device;
  744. suc_data = container_of(work, struct summary_unit_check_work_data,
  745. worker);
  746. lcu = container_of(suc_data, struct alias_lcu, suc_data);
  747. device = suc_data->device;
  748. /* 1. flush alias devices */
  749. flush_all_alias_devices_on_lcu(lcu);
  750. /* 2. reset summary unit check */
  751. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  752. device->stopped &= ~(DASD_STOPPED_SU | DASD_STOPPED_PENDING);
  753. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  754. reset_summary_unit_check(lcu, device, suc_data->reason);
  755. spin_lock_irqsave(&lcu->lock, flags);
  756. _unstop_all_devices_on_lcu(lcu);
  757. _restart_all_base_devices_on_lcu(lcu);
  758. /* 3. read new alias configuration */
  759. _schedule_lcu_update(lcu, device);
  760. lcu->suc_data.device = NULL;
  761. spin_unlock_irqrestore(&lcu->lock, flags);
  762. }
  763. /*
  764. * note: this will be called from int handler context (cdev locked)
  765. */
  766. void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
  767. struct irb *irb)
  768. {
  769. struct alias_lcu *lcu;
  770. char reason;
  771. struct dasd_eckd_private *private;
  772. private = (struct dasd_eckd_private *) device->private;
  773. reason = irb->ecw[8];
  774. DEV_MESSAGE(KERN_WARNING, device, "%s %x",
  775. "eckd handle summary unit check: reason", reason);
  776. lcu = private->lcu;
  777. if (!lcu) {
  778. DEV_MESSAGE(KERN_WARNING, device, "%s",
  779. "device not ready to handle summary"
  780. " unit check (no lcu structure)");
  781. return;
  782. }
  783. spin_lock(&lcu->lock);
  784. _stop_all_devices_on_lcu(lcu, device);
  785. /* prepare for lcu_update */
  786. private->lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING;
  787. /* If this device is about to be removed just return and wait for
  788. * the next interrupt on a different device
  789. */
  790. if (list_empty(&device->alias_list)) {
  791. DEV_MESSAGE(KERN_WARNING, device, "%s",
  792. "device is in offline processing,"
  793. " don't do summary unit check handling");
  794. spin_unlock(&lcu->lock);
  795. return;
  796. }
  797. if (lcu->suc_data.device) {
  798. /* already scheduled or running */
  799. DEV_MESSAGE(KERN_WARNING, device, "%s",
  800. "previous instance of summary unit check worker"
  801. " still pending");
  802. spin_unlock(&lcu->lock);
  803. return ;
  804. }
  805. lcu->suc_data.reason = reason;
  806. lcu->suc_data.device = device;
  807. spin_unlock(&lcu->lock);
  808. schedule_work(&lcu->suc_data.worker);
  809. };