dasd_alias.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974
  1. /*
  2. * PAV alias management for the DASD ECKD discipline
  3. *
  4. * Copyright IBM Corporation, 2007
  5. * Author(s): Stefan Weinhuber <wein@de.ibm.com>
  6. */
  7. #define KMSG_COMPONENT "dasd-eckd"
  8. #include <linux/list.h>
  9. #include <linux/slab.h>
  10. #include <asm/ebcdic.h>
  11. #include "dasd_int.h"
  12. #include "dasd_eckd.h"
  13. #ifdef PRINTK_HEADER
  14. #undef PRINTK_HEADER
  15. #endif /* PRINTK_HEADER */
  16. #define PRINTK_HEADER "dasd(eckd):"
  17. /*
  18. * General concept of alias management:
  19. * - PAV and DASD alias management is specific to the eckd discipline.
  20. * - A device is connected to an lcu as long as the device exists.
  21. * dasd_alias_make_device_known_to_lcu will be called wenn the
  22. * device is checked by the eckd discipline and
  23. * dasd_alias_disconnect_device_from_lcu will be called
  24. * before the device is deleted.
  25. * - The dasd_alias_add_device / dasd_alias_remove_device
  26. * functions mark the point when a device is 'ready for service'.
  27. * - A summary unit check is a rare occasion, but it is mandatory to
  28. * support it. It requires some complex recovery actions before the
  29. * devices can be used again (see dasd_alias_handle_summary_unit_check).
  30. * - dasd_alias_get_start_dev will find an alias device that can be used
  31. * instead of the base device and does some (very simple) load balancing.
  32. * This is the function that gets called for each I/O, so when improving
  33. * something, this function should get faster or better, the rest has just
  34. * to be correct.
  35. */
  36. static void summary_unit_check_handling_work(struct work_struct *);
  37. static void lcu_update_work(struct work_struct *);
  38. static int _schedule_lcu_update(struct alias_lcu *, struct dasd_device *);
  39. static struct alias_root aliastree = {
  40. .serverlist = LIST_HEAD_INIT(aliastree.serverlist),
  41. .lock = __SPIN_LOCK_UNLOCKED(aliastree.lock),
  42. };
  43. static struct alias_server *_find_server(struct dasd_uid *uid)
  44. {
  45. struct alias_server *pos;
  46. list_for_each_entry(pos, &aliastree.serverlist, server) {
  47. if (!strncmp(pos->uid.vendor, uid->vendor,
  48. sizeof(uid->vendor))
  49. && !strncmp(pos->uid.serial, uid->serial,
  50. sizeof(uid->serial)))
  51. return pos;
  52. };
  53. return NULL;
  54. }
  55. static struct alias_lcu *_find_lcu(struct alias_server *server,
  56. struct dasd_uid *uid)
  57. {
  58. struct alias_lcu *pos;
  59. list_for_each_entry(pos, &server->lculist, lcu) {
  60. if (pos->uid.ssid == uid->ssid)
  61. return pos;
  62. };
  63. return NULL;
  64. }
  65. static struct alias_pav_group *_find_group(struct alias_lcu *lcu,
  66. struct dasd_uid *uid)
  67. {
  68. struct alias_pav_group *pos;
  69. __u8 search_unit_addr;
  70. /* for hyper pav there is only one group */
  71. if (lcu->pav == HYPER_PAV) {
  72. if (list_empty(&lcu->grouplist))
  73. return NULL;
  74. else
  75. return list_first_entry(&lcu->grouplist,
  76. struct alias_pav_group, group);
  77. }
  78. /* for base pav we have to find the group that matches the base */
  79. if (uid->type == UA_BASE_DEVICE)
  80. search_unit_addr = uid->real_unit_addr;
  81. else
  82. search_unit_addr = uid->base_unit_addr;
  83. list_for_each_entry(pos, &lcu->grouplist, group) {
  84. if (pos->uid.base_unit_addr == search_unit_addr &&
  85. !strncmp(pos->uid.vduit, uid->vduit, sizeof(uid->vduit)))
  86. return pos;
  87. };
  88. return NULL;
  89. }
  90. static struct alias_server *_allocate_server(struct dasd_uid *uid)
  91. {
  92. struct alias_server *server;
  93. server = kzalloc(sizeof(*server), GFP_KERNEL);
  94. if (!server)
  95. return ERR_PTR(-ENOMEM);
  96. memcpy(server->uid.vendor, uid->vendor, sizeof(uid->vendor));
  97. memcpy(server->uid.serial, uid->serial, sizeof(uid->serial));
  98. INIT_LIST_HEAD(&server->server);
  99. INIT_LIST_HEAD(&server->lculist);
  100. return server;
  101. }
  102. static void _free_server(struct alias_server *server)
  103. {
  104. kfree(server);
  105. }
  106. static struct alias_lcu *_allocate_lcu(struct dasd_uid *uid)
  107. {
  108. struct alias_lcu *lcu;
  109. lcu = kzalloc(sizeof(*lcu), GFP_KERNEL);
  110. if (!lcu)
  111. return ERR_PTR(-ENOMEM);
  112. lcu->uac = kzalloc(sizeof(*(lcu->uac)), GFP_KERNEL | GFP_DMA);
  113. if (!lcu->uac)
  114. goto out_err1;
  115. lcu->rsu_cqr = kzalloc(sizeof(*lcu->rsu_cqr), GFP_KERNEL | GFP_DMA);
  116. if (!lcu->rsu_cqr)
  117. goto out_err2;
  118. lcu->rsu_cqr->cpaddr = kzalloc(sizeof(struct ccw1),
  119. GFP_KERNEL | GFP_DMA);
  120. if (!lcu->rsu_cqr->cpaddr)
  121. goto out_err3;
  122. lcu->rsu_cqr->data = kzalloc(16, GFP_KERNEL | GFP_DMA);
  123. if (!lcu->rsu_cqr->data)
  124. goto out_err4;
  125. memcpy(lcu->uid.vendor, uid->vendor, sizeof(uid->vendor));
  126. memcpy(lcu->uid.serial, uid->serial, sizeof(uid->serial));
  127. lcu->uid.ssid = uid->ssid;
  128. lcu->pav = NO_PAV;
  129. lcu->flags = NEED_UAC_UPDATE | UPDATE_PENDING;
  130. INIT_LIST_HEAD(&lcu->lcu);
  131. INIT_LIST_HEAD(&lcu->inactive_devices);
  132. INIT_LIST_HEAD(&lcu->active_devices);
  133. INIT_LIST_HEAD(&lcu->grouplist);
  134. INIT_WORK(&lcu->suc_data.worker, summary_unit_check_handling_work);
  135. INIT_DELAYED_WORK(&lcu->ruac_data.dwork, lcu_update_work);
  136. spin_lock_init(&lcu->lock);
  137. init_completion(&lcu->lcu_setup);
  138. return lcu;
  139. out_err4:
  140. kfree(lcu->rsu_cqr->cpaddr);
  141. out_err3:
  142. kfree(lcu->rsu_cqr);
  143. out_err2:
  144. kfree(lcu->uac);
  145. out_err1:
  146. kfree(lcu);
  147. return ERR_PTR(-ENOMEM);
  148. }
  149. static void _free_lcu(struct alias_lcu *lcu)
  150. {
  151. kfree(lcu->rsu_cqr->data);
  152. kfree(lcu->rsu_cqr->cpaddr);
  153. kfree(lcu->rsu_cqr);
  154. kfree(lcu->uac);
  155. kfree(lcu);
  156. }
  157. /*
  158. * This is the function that will allocate all the server and lcu data,
  159. * so this function must be called first for a new device.
  160. * If the return value is 1, the lcu was already known before, if it
  161. * is 0, this is a new lcu.
  162. * Negative return code indicates that something went wrong (e.g. -ENOMEM)
  163. */
  164. int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
  165. {
  166. struct dasd_eckd_private *private;
  167. unsigned long flags;
  168. struct alias_server *server, *newserver;
  169. struct alias_lcu *lcu, *newlcu;
  170. int is_lcu_known;
  171. struct dasd_uid *uid;
  172. private = (struct dasd_eckd_private *) device->private;
  173. uid = &private->uid;
  174. spin_lock_irqsave(&aliastree.lock, flags);
  175. is_lcu_known = 1;
  176. server = _find_server(uid);
  177. if (!server) {
  178. spin_unlock_irqrestore(&aliastree.lock, flags);
  179. newserver = _allocate_server(uid);
  180. if (IS_ERR(newserver))
  181. return PTR_ERR(newserver);
  182. spin_lock_irqsave(&aliastree.lock, flags);
  183. server = _find_server(uid);
  184. if (!server) {
  185. list_add(&newserver->server, &aliastree.serverlist);
  186. server = newserver;
  187. is_lcu_known = 0;
  188. } else {
  189. /* someone was faster */
  190. _free_server(newserver);
  191. }
  192. }
  193. lcu = _find_lcu(server, uid);
  194. if (!lcu) {
  195. spin_unlock_irqrestore(&aliastree.lock, flags);
  196. newlcu = _allocate_lcu(uid);
  197. if (IS_ERR(newlcu))
  198. return PTR_ERR(newlcu);
  199. spin_lock_irqsave(&aliastree.lock, flags);
  200. lcu = _find_lcu(server, uid);
  201. if (!lcu) {
  202. list_add(&newlcu->lcu, &server->lculist);
  203. lcu = newlcu;
  204. is_lcu_known = 0;
  205. } else {
  206. /* someone was faster */
  207. _free_lcu(newlcu);
  208. }
  209. is_lcu_known = 0;
  210. }
  211. spin_lock(&lcu->lock);
  212. list_add(&device->alias_list, &lcu->inactive_devices);
  213. private->lcu = lcu;
  214. spin_unlock(&lcu->lock);
  215. spin_unlock_irqrestore(&aliastree.lock, flags);
  216. return is_lcu_known;
  217. }
  218. /*
  219. * The first device to be registered on an LCU will have to do
  220. * some additional setup steps to configure that LCU on the
  221. * storage server. All further devices should wait with their
  222. * initialization until the first device is done.
  223. * To synchronize this work, the first device will call
  224. * dasd_alias_lcu_setup_complete when it is done, and all
  225. * other devices will wait for it with dasd_alias_wait_for_lcu_setup.
  226. */
  227. void dasd_alias_lcu_setup_complete(struct dasd_device *device)
  228. {
  229. struct dasd_eckd_private *private;
  230. unsigned long flags;
  231. struct alias_server *server;
  232. struct alias_lcu *lcu;
  233. struct dasd_uid *uid;
  234. private = (struct dasd_eckd_private *) device->private;
  235. uid = &private->uid;
  236. lcu = NULL;
  237. spin_lock_irqsave(&aliastree.lock, flags);
  238. server = _find_server(uid);
  239. if (server)
  240. lcu = _find_lcu(server, uid);
  241. spin_unlock_irqrestore(&aliastree.lock, flags);
  242. if (!lcu) {
  243. DBF_EVENT_DEVID(DBF_ERR, device->cdev,
  244. "could not find lcu for %04x %02x",
  245. uid->ssid, uid->real_unit_addr);
  246. WARN_ON(1);
  247. return;
  248. }
  249. complete_all(&lcu->lcu_setup);
  250. }
  251. void dasd_alias_wait_for_lcu_setup(struct dasd_device *device)
  252. {
  253. struct dasd_eckd_private *private;
  254. unsigned long flags;
  255. struct alias_server *server;
  256. struct alias_lcu *lcu;
  257. struct dasd_uid *uid;
  258. private = (struct dasd_eckd_private *) device->private;
  259. uid = &private->uid;
  260. lcu = NULL;
  261. spin_lock_irqsave(&aliastree.lock, flags);
  262. server = _find_server(uid);
  263. if (server)
  264. lcu = _find_lcu(server, uid);
  265. spin_unlock_irqrestore(&aliastree.lock, flags);
  266. if (!lcu) {
  267. DBF_EVENT_DEVID(DBF_ERR, device->cdev,
  268. "could not find lcu for %04x %02x",
  269. uid->ssid, uid->real_unit_addr);
  270. WARN_ON(1);
  271. return;
  272. }
  273. wait_for_completion(&lcu->lcu_setup);
  274. }
  275. /*
  276. * This function removes a device from the scope of alias management.
  277. * The complicated part is to make sure that it is not in use by
  278. * any of the workers. If necessary cancel the work.
  279. */
  280. void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
  281. {
  282. struct dasd_eckd_private *private;
  283. unsigned long flags;
  284. struct alias_lcu *lcu;
  285. struct alias_server *server;
  286. int was_pending;
  287. private = (struct dasd_eckd_private *) device->private;
  288. lcu = private->lcu;
  289. spin_lock_irqsave(&lcu->lock, flags);
  290. list_del_init(&device->alias_list);
  291. /* make sure that the workers don't use this device */
  292. if (device == lcu->suc_data.device) {
  293. spin_unlock_irqrestore(&lcu->lock, flags);
  294. cancel_work_sync(&lcu->suc_data.worker);
  295. spin_lock_irqsave(&lcu->lock, flags);
  296. if (device == lcu->suc_data.device)
  297. lcu->suc_data.device = NULL;
  298. }
  299. was_pending = 0;
  300. if (device == lcu->ruac_data.device) {
  301. spin_unlock_irqrestore(&lcu->lock, flags);
  302. was_pending = 1;
  303. cancel_delayed_work_sync(&lcu->ruac_data.dwork);
  304. spin_lock_irqsave(&lcu->lock, flags);
  305. if (device == lcu->ruac_data.device)
  306. lcu->ruac_data.device = NULL;
  307. }
  308. private->lcu = NULL;
  309. spin_unlock_irqrestore(&lcu->lock, flags);
  310. spin_lock_irqsave(&aliastree.lock, flags);
  311. spin_lock(&lcu->lock);
  312. if (list_empty(&lcu->grouplist) &&
  313. list_empty(&lcu->active_devices) &&
  314. list_empty(&lcu->inactive_devices)) {
  315. list_del(&lcu->lcu);
  316. spin_unlock(&lcu->lock);
  317. _free_lcu(lcu);
  318. lcu = NULL;
  319. } else {
  320. if (was_pending)
  321. _schedule_lcu_update(lcu, NULL);
  322. spin_unlock(&lcu->lock);
  323. }
  324. server = _find_server(&private->uid);
  325. if (server && list_empty(&server->lculist)) {
  326. list_del(&server->server);
  327. _free_server(server);
  328. }
  329. spin_unlock_irqrestore(&aliastree.lock, flags);
  330. }
  331. /*
  332. * This function assumes that the unit address configuration stored
  333. * in the lcu is up to date and will update the device uid before
  334. * adding it to a pav group.
  335. */
  336. static int _add_device_to_lcu(struct alias_lcu *lcu,
  337. struct dasd_device *device)
  338. {
  339. struct dasd_eckd_private *private;
  340. struct alias_pav_group *group;
  341. struct dasd_uid *uid;
  342. private = (struct dasd_eckd_private *) device->private;
  343. uid = &private->uid;
  344. uid->type = lcu->uac->unit[uid->real_unit_addr].ua_type;
  345. uid->base_unit_addr = lcu->uac->unit[uid->real_unit_addr].base_ua;
  346. dasd_set_uid(device->cdev, &private->uid);
  347. /* if we have no PAV anyway, we don't need to bother with PAV groups */
  348. if (lcu->pav == NO_PAV) {
  349. list_move(&device->alias_list, &lcu->active_devices);
  350. return 0;
  351. }
  352. group = _find_group(lcu, uid);
  353. if (!group) {
  354. group = kzalloc(sizeof(*group), GFP_ATOMIC);
  355. if (!group)
  356. return -ENOMEM;
  357. memcpy(group->uid.vendor, uid->vendor, sizeof(uid->vendor));
  358. memcpy(group->uid.serial, uid->serial, sizeof(uid->serial));
  359. group->uid.ssid = uid->ssid;
  360. if (uid->type == UA_BASE_DEVICE)
  361. group->uid.base_unit_addr = uid->real_unit_addr;
  362. else
  363. group->uid.base_unit_addr = uid->base_unit_addr;
  364. memcpy(group->uid.vduit, uid->vduit, sizeof(uid->vduit));
  365. INIT_LIST_HEAD(&group->group);
  366. INIT_LIST_HEAD(&group->baselist);
  367. INIT_LIST_HEAD(&group->aliaslist);
  368. list_add(&group->group, &lcu->grouplist);
  369. }
  370. if (uid->type == UA_BASE_DEVICE)
  371. list_move(&device->alias_list, &group->baselist);
  372. else
  373. list_move(&device->alias_list, &group->aliaslist);
  374. private->pavgroup = group;
  375. return 0;
  376. };
  377. static void _remove_device_from_lcu(struct alias_lcu *lcu,
  378. struct dasd_device *device)
  379. {
  380. struct dasd_eckd_private *private;
  381. struct alias_pav_group *group;
  382. private = (struct dasd_eckd_private *) device->private;
  383. list_move(&device->alias_list, &lcu->inactive_devices);
  384. group = private->pavgroup;
  385. if (!group)
  386. return;
  387. private->pavgroup = NULL;
  388. if (list_empty(&group->baselist) && list_empty(&group->aliaslist)) {
  389. list_del(&group->group);
  390. kfree(group);
  391. return;
  392. }
  393. if (group->next == device)
  394. group->next = NULL;
  395. };
  396. static int read_unit_address_configuration(struct dasd_device *device,
  397. struct alias_lcu *lcu)
  398. {
  399. struct dasd_psf_prssd_data *prssdp;
  400. struct dasd_ccw_req *cqr;
  401. struct ccw1 *ccw;
  402. int rc;
  403. unsigned long flags;
  404. cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
  405. (sizeof(struct dasd_psf_prssd_data)),
  406. device);
  407. if (IS_ERR(cqr))
  408. return PTR_ERR(cqr);
  409. cqr->startdev = device;
  410. cqr->memdev = device;
  411. clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
  412. cqr->retries = 10;
  413. cqr->expires = 20 * HZ;
  414. /* Prepare for Read Subsystem Data */
  415. prssdp = (struct dasd_psf_prssd_data *) cqr->data;
  416. memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
  417. prssdp->order = PSF_ORDER_PRSSD;
  418. prssdp->suborder = 0x0e; /* Read unit address configuration */
  419. /* all other bytes of prssdp must be zero */
  420. ccw = cqr->cpaddr;
  421. ccw->cmd_code = DASD_ECKD_CCW_PSF;
  422. ccw->count = sizeof(struct dasd_psf_prssd_data);
  423. ccw->flags |= CCW_FLAG_CC;
  424. ccw->cda = (__u32)(addr_t) prssdp;
  425. /* Read Subsystem Data - feature codes */
  426. memset(lcu->uac, 0, sizeof(*(lcu->uac)));
  427. ccw++;
  428. ccw->cmd_code = DASD_ECKD_CCW_RSSD;
  429. ccw->count = sizeof(*(lcu->uac));
  430. ccw->cda = (__u32)(addr_t) lcu->uac;
  431. cqr->buildclk = get_clock();
  432. cqr->status = DASD_CQR_FILLED;
  433. /* need to unset flag here to detect race with summary unit check */
  434. spin_lock_irqsave(&lcu->lock, flags);
  435. lcu->flags &= ~NEED_UAC_UPDATE;
  436. spin_unlock_irqrestore(&lcu->lock, flags);
  437. do {
  438. rc = dasd_sleep_on(cqr);
  439. } while (rc && (cqr->retries > 0));
  440. if (rc) {
  441. spin_lock_irqsave(&lcu->lock, flags);
  442. lcu->flags |= NEED_UAC_UPDATE;
  443. spin_unlock_irqrestore(&lcu->lock, flags);
  444. }
  445. dasd_kfree_request(cqr, cqr->memdev);
  446. return rc;
  447. }
  448. static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
  449. {
  450. unsigned long flags;
  451. struct alias_pav_group *pavgroup, *tempgroup;
  452. struct dasd_device *device, *tempdev;
  453. int i, rc;
  454. struct dasd_eckd_private *private;
  455. spin_lock_irqsave(&lcu->lock, flags);
  456. list_for_each_entry_safe(pavgroup, tempgroup, &lcu->grouplist, group) {
  457. list_for_each_entry_safe(device, tempdev, &pavgroup->baselist,
  458. alias_list) {
  459. list_move(&device->alias_list, &lcu->active_devices);
  460. private = (struct dasd_eckd_private *) device->private;
  461. private->pavgroup = NULL;
  462. }
  463. list_for_each_entry_safe(device, tempdev, &pavgroup->aliaslist,
  464. alias_list) {
  465. list_move(&device->alias_list, &lcu->active_devices);
  466. private = (struct dasd_eckd_private *) device->private;
  467. private->pavgroup = NULL;
  468. }
  469. list_del(&pavgroup->group);
  470. kfree(pavgroup);
  471. }
  472. spin_unlock_irqrestore(&lcu->lock, flags);
  473. rc = read_unit_address_configuration(refdev, lcu);
  474. if (rc)
  475. return rc;
  476. spin_lock_irqsave(&lcu->lock, flags);
  477. lcu->pav = NO_PAV;
  478. for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
  479. switch (lcu->uac->unit[i].ua_type) {
  480. case UA_BASE_PAV_ALIAS:
  481. lcu->pav = BASE_PAV;
  482. break;
  483. case UA_HYPER_PAV_ALIAS:
  484. lcu->pav = HYPER_PAV;
  485. break;
  486. }
  487. if (lcu->pav != NO_PAV)
  488. break;
  489. }
  490. list_for_each_entry_safe(device, tempdev, &lcu->active_devices,
  491. alias_list) {
  492. _add_device_to_lcu(lcu, device);
  493. }
  494. spin_unlock_irqrestore(&lcu->lock, flags);
  495. return 0;
  496. }
  497. static void lcu_update_work(struct work_struct *work)
  498. {
  499. struct alias_lcu *lcu;
  500. struct read_uac_work_data *ruac_data;
  501. struct dasd_device *device;
  502. unsigned long flags;
  503. int rc;
  504. ruac_data = container_of(work, struct read_uac_work_data, dwork.work);
  505. lcu = container_of(ruac_data, struct alias_lcu, ruac_data);
  506. device = ruac_data->device;
  507. rc = _lcu_update(device, lcu);
  508. /*
  509. * Need to check flags again, as there could have been another
  510. * prepare_update or a new device a new device while we were still
  511. * processing the data
  512. */
  513. spin_lock_irqsave(&lcu->lock, flags);
  514. if (rc || (lcu->flags & NEED_UAC_UPDATE)) {
  515. DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
  516. " alias data in lcu (rc = %d), retry later", rc);
  517. schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ);
  518. } else {
  519. lcu->ruac_data.device = NULL;
  520. lcu->flags &= ~UPDATE_PENDING;
  521. }
  522. spin_unlock_irqrestore(&lcu->lock, flags);
  523. }
  524. static int _schedule_lcu_update(struct alias_lcu *lcu,
  525. struct dasd_device *device)
  526. {
  527. struct dasd_device *usedev = NULL;
  528. struct alias_pav_group *group;
  529. lcu->flags |= NEED_UAC_UPDATE;
  530. if (lcu->ruac_data.device) {
  531. /* already scheduled or running */
  532. return 0;
  533. }
  534. if (device && !list_empty(&device->alias_list))
  535. usedev = device;
  536. if (!usedev && !list_empty(&lcu->grouplist)) {
  537. group = list_first_entry(&lcu->grouplist,
  538. struct alias_pav_group, group);
  539. if (!list_empty(&group->baselist))
  540. usedev = list_first_entry(&group->baselist,
  541. struct dasd_device,
  542. alias_list);
  543. else if (!list_empty(&group->aliaslist))
  544. usedev = list_first_entry(&group->aliaslist,
  545. struct dasd_device,
  546. alias_list);
  547. }
  548. if (!usedev && !list_empty(&lcu->active_devices)) {
  549. usedev = list_first_entry(&lcu->active_devices,
  550. struct dasd_device, alias_list);
  551. }
  552. /*
  553. * if we haven't found a proper device yet, give up for now, the next
  554. * device that will be set active will trigger an lcu update
  555. */
  556. if (!usedev)
  557. return -EINVAL;
  558. lcu->ruac_data.device = usedev;
  559. schedule_delayed_work(&lcu->ruac_data.dwork, 0);
  560. return 0;
  561. }
  562. int dasd_alias_add_device(struct dasd_device *device)
  563. {
  564. struct dasd_eckd_private *private;
  565. struct alias_lcu *lcu;
  566. unsigned long flags;
  567. int rc;
  568. private = (struct dasd_eckd_private *) device->private;
  569. lcu = private->lcu;
  570. rc = 0;
  571. spin_lock_irqsave(&lcu->lock, flags);
  572. if (!(lcu->flags & UPDATE_PENDING)) {
  573. rc = _add_device_to_lcu(lcu, device);
  574. if (rc)
  575. lcu->flags |= UPDATE_PENDING;
  576. }
  577. if (lcu->flags & UPDATE_PENDING) {
  578. list_move(&device->alias_list, &lcu->active_devices);
  579. _schedule_lcu_update(lcu, device);
  580. }
  581. spin_unlock_irqrestore(&lcu->lock, flags);
  582. return rc;
  583. }
  584. int dasd_alias_remove_device(struct dasd_device *device)
  585. {
  586. struct dasd_eckd_private *private;
  587. struct alias_lcu *lcu;
  588. unsigned long flags;
  589. private = (struct dasd_eckd_private *) device->private;
  590. lcu = private->lcu;
  591. spin_lock_irqsave(&lcu->lock, flags);
  592. _remove_device_from_lcu(lcu, device);
  593. spin_unlock_irqrestore(&lcu->lock, flags);
  594. return 0;
  595. }
  596. struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
  597. {
  598. struct dasd_device *alias_device;
  599. struct alias_pav_group *group;
  600. struct alias_lcu *lcu;
  601. struct dasd_eckd_private *private, *alias_priv;
  602. unsigned long flags;
  603. private = (struct dasd_eckd_private *) base_device->private;
  604. group = private->pavgroup;
  605. lcu = private->lcu;
  606. if (!group || !lcu)
  607. return NULL;
  608. if (lcu->pav == NO_PAV ||
  609. lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING))
  610. return NULL;
  611. spin_lock_irqsave(&lcu->lock, flags);
  612. alias_device = group->next;
  613. if (!alias_device) {
  614. if (list_empty(&group->aliaslist)) {
  615. spin_unlock_irqrestore(&lcu->lock, flags);
  616. return NULL;
  617. } else {
  618. alias_device = list_first_entry(&group->aliaslist,
  619. struct dasd_device,
  620. alias_list);
  621. }
  622. }
  623. if (list_is_last(&alias_device->alias_list, &group->aliaslist))
  624. group->next = list_first_entry(&group->aliaslist,
  625. struct dasd_device, alias_list);
  626. else
  627. group->next = list_first_entry(&alias_device->alias_list,
  628. struct dasd_device, alias_list);
  629. spin_unlock_irqrestore(&lcu->lock, flags);
  630. alias_priv = (struct dasd_eckd_private *) alias_device->private;
  631. if ((alias_priv->count < private->count) && !alias_device->stopped)
  632. return alias_device;
  633. else
  634. return NULL;
  635. }
  636. /*
  637. * Summary unit check handling depends on the way alias devices
  638. * are handled so it is done here rather then in dasd_eckd.c
  639. */
  640. static int reset_summary_unit_check(struct alias_lcu *lcu,
  641. struct dasd_device *device,
  642. char reason)
  643. {
  644. struct dasd_ccw_req *cqr;
  645. int rc = 0;
  646. struct ccw1 *ccw;
  647. cqr = lcu->rsu_cqr;
  648. strncpy((char *) &cqr->magic, "ECKD", 4);
  649. ASCEBC((char *) &cqr->magic, 4);
  650. ccw = cqr->cpaddr;
  651. ccw->cmd_code = DASD_ECKD_CCW_RSCK;
  652. ccw->flags = 0 ;
  653. ccw->count = 16;
  654. ccw->cda = (__u32)(addr_t) cqr->data;
  655. ((char *)cqr->data)[0] = reason;
  656. clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
  657. cqr->retries = 255; /* set retry counter to enable basic ERP */
  658. cqr->startdev = device;
  659. cqr->memdev = device;
  660. cqr->block = NULL;
  661. cqr->expires = 5 * HZ;
  662. cqr->buildclk = get_clock();
  663. cqr->status = DASD_CQR_FILLED;
  664. rc = dasd_sleep_on_immediatly(cqr);
  665. return rc;
  666. }
  667. static void _restart_all_base_devices_on_lcu(struct alias_lcu *lcu)
  668. {
  669. struct alias_pav_group *pavgroup;
  670. struct dasd_device *device;
  671. struct dasd_eckd_private *private;
  672. /* active and inactive list can contain alias as well as base devices */
  673. list_for_each_entry(device, &lcu->active_devices, alias_list) {
  674. private = (struct dasd_eckd_private *) device->private;
  675. if (private->uid.type != UA_BASE_DEVICE)
  676. continue;
  677. dasd_schedule_block_bh(device->block);
  678. dasd_schedule_device_bh(device);
  679. }
  680. list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
  681. private = (struct dasd_eckd_private *) device->private;
  682. if (private->uid.type != UA_BASE_DEVICE)
  683. continue;
  684. dasd_schedule_block_bh(device->block);
  685. dasd_schedule_device_bh(device);
  686. }
  687. list_for_each_entry(pavgroup, &lcu->grouplist, group) {
  688. list_for_each_entry(device, &pavgroup->baselist, alias_list) {
  689. dasd_schedule_block_bh(device->block);
  690. dasd_schedule_device_bh(device);
  691. }
  692. }
  693. }
  694. static void flush_all_alias_devices_on_lcu(struct alias_lcu *lcu)
  695. {
  696. struct alias_pav_group *pavgroup;
  697. struct dasd_device *device, *temp;
  698. struct dasd_eckd_private *private;
  699. int rc;
  700. unsigned long flags;
  701. LIST_HEAD(active);
  702. /*
  703. * Problem here ist that dasd_flush_device_queue may wait
  704. * for termination of a request to complete. We can't keep
  705. * the lcu lock during that time, so we must assume that
  706. * the lists may have changed.
  707. * Idea: first gather all active alias devices in a separate list,
  708. * then flush the first element of this list unlocked, and afterwards
  709. * check if it is still on the list before moving it to the
  710. * active_devices list.
  711. */
  712. spin_lock_irqsave(&lcu->lock, flags);
  713. list_for_each_entry_safe(device, temp, &lcu->active_devices,
  714. alias_list) {
  715. private = (struct dasd_eckd_private *) device->private;
  716. if (private->uid.type == UA_BASE_DEVICE)
  717. continue;
  718. list_move(&device->alias_list, &active);
  719. }
  720. list_for_each_entry(pavgroup, &lcu->grouplist, group) {
  721. list_splice_init(&pavgroup->aliaslist, &active);
  722. }
  723. while (!list_empty(&active)) {
  724. device = list_first_entry(&active, struct dasd_device,
  725. alias_list);
  726. spin_unlock_irqrestore(&lcu->lock, flags);
  727. rc = dasd_flush_device_queue(device);
  728. spin_lock_irqsave(&lcu->lock, flags);
  729. /*
  730. * only move device around if it wasn't moved away while we
  731. * were waiting for the flush
  732. */
  733. if (device == list_first_entry(&active,
  734. struct dasd_device, alias_list))
  735. list_move(&device->alias_list, &lcu->active_devices);
  736. }
  737. spin_unlock_irqrestore(&lcu->lock, flags);
  738. }
  739. static void __stop_device_on_lcu(struct dasd_device *device,
  740. struct dasd_device *pos)
  741. {
  742. /* If pos == device then device is already locked! */
  743. if (pos == device) {
  744. dasd_device_set_stop_bits(pos, DASD_STOPPED_SU);
  745. return;
  746. }
  747. spin_lock(get_ccwdev_lock(pos->cdev));
  748. dasd_device_set_stop_bits(pos, DASD_STOPPED_SU);
  749. spin_unlock(get_ccwdev_lock(pos->cdev));
  750. }
  751. /*
  752. * This function is called in interrupt context, so the
  753. * cdev lock for device is already locked!
  754. */
  755. static void _stop_all_devices_on_lcu(struct alias_lcu *lcu,
  756. struct dasd_device *device)
  757. {
  758. struct alias_pav_group *pavgroup;
  759. struct dasd_device *pos;
  760. list_for_each_entry(pos, &lcu->active_devices, alias_list)
  761. __stop_device_on_lcu(device, pos);
  762. list_for_each_entry(pos, &lcu->inactive_devices, alias_list)
  763. __stop_device_on_lcu(device, pos);
  764. list_for_each_entry(pavgroup, &lcu->grouplist, group) {
  765. list_for_each_entry(pos, &pavgroup->baselist, alias_list)
  766. __stop_device_on_lcu(device, pos);
  767. list_for_each_entry(pos, &pavgroup->aliaslist, alias_list)
  768. __stop_device_on_lcu(device, pos);
  769. }
  770. }
  771. static void _unstop_all_devices_on_lcu(struct alias_lcu *lcu)
  772. {
  773. struct alias_pav_group *pavgroup;
  774. struct dasd_device *device;
  775. unsigned long flags;
  776. list_for_each_entry(device, &lcu->active_devices, alias_list) {
  777. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  778. dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
  779. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  780. }
  781. list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
  782. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  783. dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
  784. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  785. }
  786. list_for_each_entry(pavgroup, &lcu->grouplist, group) {
  787. list_for_each_entry(device, &pavgroup->baselist, alias_list) {
  788. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  789. dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
  790. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
  791. flags);
  792. }
  793. list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
  794. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  795. dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
  796. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
  797. flags);
  798. }
  799. }
  800. }
  801. static void summary_unit_check_handling_work(struct work_struct *work)
  802. {
  803. struct alias_lcu *lcu;
  804. struct summary_unit_check_work_data *suc_data;
  805. unsigned long flags;
  806. struct dasd_device *device;
  807. suc_data = container_of(work, struct summary_unit_check_work_data,
  808. worker);
  809. lcu = container_of(suc_data, struct alias_lcu, suc_data);
  810. device = suc_data->device;
  811. /* 1. flush alias devices */
  812. flush_all_alias_devices_on_lcu(lcu);
  813. /* 2. reset summary unit check */
  814. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  815. dasd_device_remove_stop_bits(device,
  816. (DASD_STOPPED_SU | DASD_STOPPED_PENDING));
  817. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  818. reset_summary_unit_check(lcu, device, suc_data->reason);
  819. spin_lock_irqsave(&lcu->lock, flags);
  820. _unstop_all_devices_on_lcu(lcu);
  821. _restart_all_base_devices_on_lcu(lcu);
  822. /* 3. read new alias configuration */
  823. _schedule_lcu_update(lcu, device);
  824. lcu->suc_data.device = NULL;
  825. spin_unlock_irqrestore(&lcu->lock, flags);
  826. }
  827. /*
  828. * note: this will be called from int handler context (cdev locked)
  829. */
  830. void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
  831. struct irb *irb)
  832. {
  833. struct alias_lcu *lcu;
  834. char reason;
  835. struct dasd_eckd_private *private;
  836. char *sense;
  837. private = (struct dasd_eckd_private *) device->private;
  838. sense = dasd_get_sense(irb);
  839. if (sense) {
  840. reason = sense[8];
  841. DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
  842. "eckd handle summary unit check: reason", reason);
  843. } else {
  844. DBF_DEV_EVENT(DBF_WARNING, device, "%s",
  845. "eckd handle summary unit check:"
  846. " no reason code available");
  847. return;
  848. }
  849. lcu = private->lcu;
  850. if (!lcu) {
  851. DBF_DEV_EVENT(DBF_WARNING, device, "%s",
  852. "device not ready to handle summary"
  853. " unit check (no lcu structure)");
  854. return;
  855. }
  856. spin_lock(&lcu->lock);
  857. _stop_all_devices_on_lcu(lcu, device);
  858. /* prepare for lcu_update */
  859. private->lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING;
  860. /* If this device is about to be removed just return and wait for
  861. * the next interrupt on a different device
  862. */
  863. if (list_empty(&device->alias_list)) {
  864. DBF_DEV_EVENT(DBF_WARNING, device, "%s",
  865. "device is in offline processing,"
  866. " don't do summary unit check handling");
  867. spin_unlock(&lcu->lock);
  868. return;
  869. }
  870. if (lcu->suc_data.device) {
  871. /* already scheduled or running */
  872. DBF_DEV_EVENT(DBF_WARNING, device, "%s",
  873. "previous instance of summary unit check worker"
  874. " still pending");
  875. spin_unlock(&lcu->lock);
  876. return ;
  877. }
  878. lcu->suc_data.reason = reason;
  879. lcu->suc_data.device = device;
  880. spin_unlock(&lcu->lock);
  881. schedule_work(&lcu->suc_data.worker);
  882. };