zcrypt_api.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981
  1. /*
  2. * linux/drivers/s390/crypto/zcrypt_api.c
  3. *
  4. * zcrypt 2.0.0
  5. *
  6. * Copyright (C) 2001, 2006 IBM Corporation
  7. * Author(s): Robert Burroughs
  8. * Eric Rossman (edrossma@us.ibm.com)
  9. * Cornelia Huck <cornelia.huck@de.ibm.com>
  10. *
  11. * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
  12. * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  13. * Ralph Wuerthner <rwuerthn@de.ibm.com>
  14. *
  15. * This program is free software; you can redistribute it and/or modify
  16. * it under the terms of the GNU General Public License as published by
  17. * the Free Software Foundation; either version 2, or (at your option)
  18. * any later version.
  19. *
  20. * This program is distributed in the hope that it will be useful,
  21. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  22. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  23. * GNU General Public License for more details.
  24. *
  25. * You should have received a copy of the GNU General Public License
  26. * along with this program; if not, write to the Free Software
  27. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  28. */
  29. #include <linux/module.h>
  30. #include <linux/init.h>
  31. #include <linux/interrupt.h>
  32. #include <linux/miscdevice.h>
  33. #include <linux/fs.h>
  34. #include <linux/proc_fs.h>
  35. #include <linux/compat.h>
  36. #include <asm/atomic.h>
  37. #include <asm/uaccess.h>
  38. #include "zcrypt_api.h"
  39. /**
  40. * Module description.
  41. */
  42. MODULE_AUTHOR("IBM Corporation");
  43. MODULE_DESCRIPTION("Cryptographic Coprocessor interface, "
  44. "Copyright 2001, 2006 IBM Corporation");
  45. MODULE_LICENSE("GPL");
  46. static DEFINE_SPINLOCK(zcrypt_device_lock);
  47. static LIST_HEAD(zcrypt_device_list);
  48. static int zcrypt_device_count = 0;
  49. static atomic_t zcrypt_open_count = ATOMIC_INIT(0);
  50. /**
  51. * Device attributes common for all crypto devices.
  52. */
  53. static ssize_t zcrypt_type_show(struct device *dev,
  54. struct device_attribute *attr, char *buf)
  55. {
  56. struct zcrypt_device *zdev = to_ap_dev(dev)->private;
  57. return snprintf(buf, PAGE_SIZE, "%s\n", zdev->type_string);
  58. }
  59. static DEVICE_ATTR(type, 0444, zcrypt_type_show, NULL);
  60. static ssize_t zcrypt_online_show(struct device *dev,
  61. struct device_attribute *attr, char *buf)
  62. {
  63. struct zcrypt_device *zdev = to_ap_dev(dev)->private;
  64. return snprintf(buf, PAGE_SIZE, "%d\n", zdev->online);
  65. }
  66. static ssize_t zcrypt_online_store(struct device *dev,
  67. struct device_attribute *attr,
  68. const char *buf, size_t count)
  69. {
  70. struct zcrypt_device *zdev = to_ap_dev(dev)->private;
  71. int online;
  72. if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1)
  73. return -EINVAL;
  74. zdev->online = online;
  75. if (!online)
  76. ap_flush_queue(zdev->ap_dev);
  77. return count;
  78. }
  79. static DEVICE_ATTR(online, 0644, zcrypt_online_show, zcrypt_online_store);
  80. static struct attribute * zcrypt_device_attrs[] = {
  81. &dev_attr_type.attr,
  82. &dev_attr_online.attr,
  83. NULL,
  84. };
  85. static struct attribute_group zcrypt_device_attr_group = {
  86. .attrs = zcrypt_device_attrs,
  87. };
  88. /**
  89. * Move the device towards the head of the device list.
  90. * Need to be called while holding the zcrypt device list lock.
  91. * Note: cards with speed_rating of 0 are kept at the end of the list.
  92. */
  93. static void __zcrypt_increase_preference(struct zcrypt_device *zdev)
  94. {
  95. struct zcrypt_device *tmp;
  96. struct list_head *l;
  97. if (zdev->speed_rating == 0)
  98. return;
  99. for (l = zdev->list.prev; l != &zcrypt_device_list; l = l->prev) {
  100. tmp = list_entry(l, struct zcrypt_device, list);
  101. if ((tmp->request_count + 1) * tmp->speed_rating <=
  102. (zdev->request_count + 1) * zdev->speed_rating &&
  103. tmp->speed_rating != 0)
  104. break;
  105. }
  106. if (l == zdev->list.prev)
  107. return;
  108. /* Move zdev behind l */
  109. list_del(&zdev->list);
  110. list_add(&zdev->list, l);
  111. }
  112. /**
  113. * Move the device towards the tail of the device list.
  114. * Need to be called while holding the zcrypt device list lock.
  115. * Note: cards with speed_rating of 0 are kept at the end of the list.
  116. */
  117. static void __zcrypt_decrease_preference(struct zcrypt_device *zdev)
  118. {
  119. struct zcrypt_device *tmp;
  120. struct list_head *l;
  121. if (zdev->speed_rating == 0)
  122. return;
  123. for (l = zdev->list.next; l != &zcrypt_device_list; l = l->next) {
  124. tmp = list_entry(l, struct zcrypt_device, list);
  125. if ((tmp->request_count + 1) * tmp->speed_rating >
  126. (zdev->request_count + 1) * zdev->speed_rating ||
  127. tmp->speed_rating == 0)
  128. break;
  129. }
  130. if (l == zdev->list.next)
  131. return;
  132. /* Move zdev before l */
  133. list_del(&zdev->list);
  134. list_add_tail(&zdev->list, l);
  135. }
  136. static void zcrypt_device_release(struct kref *kref)
  137. {
  138. struct zcrypt_device *zdev =
  139. container_of(kref, struct zcrypt_device, refcount);
  140. zcrypt_device_free(zdev);
  141. }
  142. void zcrypt_device_get(struct zcrypt_device *zdev)
  143. {
  144. kref_get(&zdev->refcount);
  145. }
  146. EXPORT_SYMBOL(zcrypt_device_get);
  147. int zcrypt_device_put(struct zcrypt_device *zdev)
  148. {
  149. return kref_put(&zdev->refcount, zcrypt_device_release);
  150. }
  151. EXPORT_SYMBOL(zcrypt_device_put);
  152. struct zcrypt_device *zcrypt_device_alloc(size_t max_response_size)
  153. {
  154. struct zcrypt_device *zdev;
  155. zdev = kzalloc(sizeof(struct zcrypt_device), GFP_KERNEL);
  156. if (!zdev)
  157. return NULL;
  158. zdev->reply.message = kmalloc(max_response_size, GFP_KERNEL);
  159. if (!zdev->reply.message)
  160. goto out_free;
  161. zdev->reply.length = max_response_size;
  162. spin_lock_init(&zdev->lock);
  163. INIT_LIST_HEAD(&zdev->list);
  164. return zdev;
  165. out_free:
  166. kfree(zdev);
  167. return NULL;
  168. }
  169. EXPORT_SYMBOL(zcrypt_device_alloc);
  170. void zcrypt_device_free(struct zcrypt_device *zdev)
  171. {
  172. kfree(zdev->reply.message);
  173. kfree(zdev);
  174. }
  175. EXPORT_SYMBOL(zcrypt_device_free);
  176. /**
  177. * Register a crypto device.
  178. */
  179. int zcrypt_device_register(struct zcrypt_device *zdev)
  180. {
  181. int rc;
  182. rc = sysfs_create_group(&zdev->ap_dev->device.kobj,
  183. &zcrypt_device_attr_group);
  184. if (rc)
  185. goto out;
  186. get_device(&zdev->ap_dev->device);
  187. kref_init(&zdev->refcount);
  188. spin_lock_bh(&zcrypt_device_lock);
  189. zdev->online = 1; /* New devices are online by default. */
  190. list_add_tail(&zdev->list, &zcrypt_device_list);
  191. __zcrypt_increase_preference(zdev);
  192. zcrypt_device_count++;
  193. spin_unlock_bh(&zcrypt_device_lock);
  194. out:
  195. return rc;
  196. }
  197. EXPORT_SYMBOL(zcrypt_device_register);
  198. /**
  199. * Unregister a crypto device.
  200. */
  201. void zcrypt_device_unregister(struct zcrypt_device *zdev)
  202. {
  203. spin_lock_bh(&zcrypt_device_lock);
  204. zcrypt_device_count--;
  205. list_del_init(&zdev->list);
  206. spin_unlock_bh(&zcrypt_device_lock);
  207. sysfs_remove_group(&zdev->ap_dev->device.kobj,
  208. &zcrypt_device_attr_group);
  209. put_device(&zdev->ap_dev->device);
  210. zcrypt_device_put(zdev);
  211. }
  212. EXPORT_SYMBOL(zcrypt_device_unregister);
  213. /**
  214. * zcrypt_read is not be supported beyond zcrypt 1.3.1
  215. */
  216. static ssize_t zcrypt_read(struct file *filp, char __user *buf,
  217. size_t count, loff_t *f_pos)
  218. {
  219. return -EPERM;
  220. }
  221. /**
  222. * Write is is not allowed
  223. */
  224. static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
  225. size_t count, loff_t *f_pos)
  226. {
  227. return -EPERM;
  228. }
  229. /**
  230. * Device open/close functions to count number of users.
  231. */
  232. static int zcrypt_open(struct inode *inode, struct file *filp)
  233. {
  234. atomic_inc(&zcrypt_open_count);
  235. return 0;
  236. }
  237. static int zcrypt_release(struct inode *inode, struct file *filp)
  238. {
  239. atomic_dec(&zcrypt_open_count);
  240. return 0;
  241. }
  242. /**
  243. * zcrypt ioctls.
  244. */
  245. static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex)
  246. {
  247. struct zcrypt_device *zdev;
  248. int rc;
  249. if (mex->outputdatalength < mex->inputdatalength)
  250. return -EINVAL;
  251. /**
  252. * As long as outputdatalength is big enough, we can set the
  253. * outputdatalength equal to the inputdatalength, since that is the
  254. * number of bytes we will copy in any case
  255. */
  256. mex->outputdatalength = mex->inputdatalength;
  257. spin_lock_bh(&zcrypt_device_lock);
  258. list_for_each_entry(zdev, &zcrypt_device_list, list) {
  259. if (!zdev->online ||
  260. !zdev->ops->rsa_modexpo ||
  261. zdev->min_mod_size > mex->inputdatalength ||
  262. zdev->max_mod_size < mex->inputdatalength)
  263. continue;
  264. zcrypt_device_get(zdev);
  265. get_device(&zdev->ap_dev->device);
  266. zdev->request_count++;
  267. __zcrypt_decrease_preference(zdev);
  268. spin_unlock_bh(&zcrypt_device_lock);
  269. if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
  270. rc = zdev->ops->rsa_modexpo(zdev, mex);
  271. module_put(zdev->ap_dev->drv->driver.owner);
  272. }
  273. else
  274. rc = -EAGAIN;
  275. spin_lock_bh(&zcrypt_device_lock);
  276. zdev->request_count--;
  277. __zcrypt_increase_preference(zdev);
  278. put_device(&zdev->ap_dev->device);
  279. zcrypt_device_put(zdev);
  280. spin_unlock_bh(&zcrypt_device_lock);
  281. return rc;
  282. }
  283. spin_unlock_bh(&zcrypt_device_lock);
  284. return -ENODEV;
  285. }
  286. static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
  287. {
  288. struct zcrypt_device *zdev;
  289. unsigned long long z1, z2, z3;
  290. int rc, copied;
  291. if (crt->outputdatalength < crt->inputdatalength ||
  292. (crt->inputdatalength & 1))
  293. return -EINVAL;
  294. /**
  295. * As long as outputdatalength is big enough, we can set the
  296. * outputdatalength equal to the inputdatalength, since that is the
  297. * number of bytes we will copy in any case
  298. */
  299. crt->outputdatalength = crt->inputdatalength;
  300. copied = 0;
  301. restart:
  302. spin_lock_bh(&zcrypt_device_lock);
  303. list_for_each_entry(zdev, &zcrypt_device_list, list) {
  304. if (!zdev->online ||
  305. !zdev->ops->rsa_modexpo_crt ||
  306. zdev->min_mod_size > crt->inputdatalength ||
  307. zdev->max_mod_size < crt->inputdatalength)
  308. continue;
  309. if (zdev->short_crt && crt->inputdatalength > 240) {
  310. /**
  311. * Check inputdata for leading zeros for cards
  312. * that can't handle np_prime, bp_key, or
  313. * u_mult_inv > 128 bytes.
  314. */
  315. if (copied == 0) {
  316. int len;
  317. spin_unlock_bh(&zcrypt_device_lock);
  318. /* len is max 256 / 2 - 120 = 8 */
  319. len = crt->inputdatalength / 2 - 120;
  320. z1 = z2 = z3 = 0;
  321. if (copy_from_user(&z1, crt->np_prime, len) ||
  322. copy_from_user(&z2, crt->bp_key, len) ||
  323. copy_from_user(&z3, crt->u_mult_inv, len))
  324. return -EFAULT;
  325. copied = 1;
  326. /**
  327. * We have to restart device lookup -
  328. * the device list may have changed by now.
  329. */
  330. goto restart;
  331. }
  332. if (z1 != 0ULL || z2 != 0ULL || z3 != 0ULL)
  333. /* The device can't handle this request. */
  334. continue;
  335. }
  336. zcrypt_device_get(zdev);
  337. get_device(&zdev->ap_dev->device);
  338. zdev->request_count++;
  339. __zcrypt_decrease_preference(zdev);
  340. spin_unlock_bh(&zcrypt_device_lock);
  341. if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
  342. rc = zdev->ops->rsa_modexpo_crt(zdev, crt);
  343. module_put(zdev->ap_dev->drv->driver.owner);
  344. }
  345. else
  346. rc = -EAGAIN;
  347. spin_lock_bh(&zcrypt_device_lock);
  348. zdev->request_count--;
  349. __zcrypt_increase_preference(zdev);
  350. put_device(&zdev->ap_dev->device);
  351. zcrypt_device_put(zdev);
  352. spin_unlock_bh(&zcrypt_device_lock);
  353. return rc;
  354. }
  355. spin_unlock_bh(&zcrypt_device_lock);
  356. return -ENODEV;
  357. }
  358. static void zcrypt_status_mask(char status[AP_DEVICES])
  359. {
  360. struct zcrypt_device *zdev;
  361. memset(status, 0, sizeof(char) * AP_DEVICES);
  362. spin_lock_bh(&zcrypt_device_lock);
  363. list_for_each_entry(zdev, &zcrypt_device_list, list)
  364. status[AP_QID_DEVICE(zdev->ap_dev->qid)] =
  365. zdev->online ? zdev->user_space_type : 0x0d;
  366. spin_unlock_bh(&zcrypt_device_lock);
  367. }
  368. static void zcrypt_qdepth_mask(char qdepth[AP_DEVICES])
  369. {
  370. struct zcrypt_device *zdev;
  371. memset(qdepth, 0, sizeof(char) * AP_DEVICES);
  372. spin_lock_bh(&zcrypt_device_lock);
  373. list_for_each_entry(zdev, &zcrypt_device_list, list) {
  374. spin_lock(&zdev->ap_dev->lock);
  375. qdepth[AP_QID_DEVICE(zdev->ap_dev->qid)] =
  376. zdev->ap_dev->pendingq_count +
  377. zdev->ap_dev->requestq_count;
  378. spin_unlock(&zdev->ap_dev->lock);
  379. }
  380. spin_unlock_bh(&zcrypt_device_lock);
  381. }
  382. static void zcrypt_perdev_reqcnt(int reqcnt[AP_DEVICES])
  383. {
  384. struct zcrypt_device *zdev;
  385. memset(reqcnt, 0, sizeof(int) * AP_DEVICES);
  386. spin_lock_bh(&zcrypt_device_lock);
  387. list_for_each_entry(zdev, &zcrypt_device_list, list) {
  388. spin_lock(&zdev->ap_dev->lock);
  389. reqcnt[AP_QID_DEVICE(zdev->ap_dev->qid)] =
  390. zdev->ap_dev->total_request_count;
  391. spin_unlock(&zdev->ap_dev->lock);
  392. }
  393. spin_unlock_bh(&zcrypt_device_lock);
  394. }
  395. static int zcrypt_pendingq_count(void)
  396. {
  397. struct zcrypt_device *zdev;
  398. int pendingq_count = 0;
  399. spin_lock_bh(&zcrypt_device_lock);
  400. list_for_each_entry(zdev, &zcrypt_device_list, list) {
  401. spin_lock(&zdev->ap_dev->lock);
  402. pendingq_count += zdev->ap_dev->pendingq_count;
  403. spin_unlock(&zdev->ap_dev->lock);
  404. }
  405. spin_unlock_bh(&zcrypt_device_lock);
  406. return pendingq_count;
  407. }
  408. static int zcrypt_requestq_count(void)
  409. {
  410. struct zcrypt_device *zdev;
  411. int requestq_count = 0;
  412. spin_lock_bh(&zcrypt_device_lock);
  413. list_for_each_entry(zdev, &zcrypt_device_list, list) {
  414. spin_lock(&zdev->ap_dev->lock);
  415. requestq_count += zdev->ap_dev->requestq_count;
  416. spin_unlock(&zdev->ap_dev->lock);
  417. }
  418. spin_unlock_bh(&zcrypt_device_lock);
  419. return requestq_count;
  420. }
  421. static int zcrypt_count_type(int type)
  422. {
  423. struct zcrypt_device *zdev;
  424. int device_count = 0;
  425. spin_lock_bh(&zcrypt_device_lock);
  426. list_for_each_entry(zdev, &zcrypt_device_list, list)
  427. if (zdev->user_space_type == type)
  428. device_count++;
  429. spin_unlock_bh(&zcrypt_device_lock);
  430. return device_count;
  431. }
  432. /**
  433. * Old, deprecated combi status call.
  434. */
  435. static long zcrypt_ica_status(struct file *filp, unsigned long arg)
  436. {
  437. struct ica_z90_status *pstat;
  438. int ret;
  439. pstat = kzalloc(sizeof(*pstat), GFP_KERNEL);
  440. if (!pstat)
  441. return -ENOMEM;
  442. pstat->totalcount = zcrypt_device_count;
  443. pstat->leedslitecount = zcrypt_count_type(ZCRYPT_PCICA);
  444. pstat->leeds2count = zcrypt_count_type(ZCRYPT_PCICC);
  445. pstat->requestqWaitCount = zcrypt_requestq_count();
  446. pstat->pendingqWaitCount = zcrypt_pendingq_count();
  447. pstat->totalOpenCount = atomic_read(&zcrypt_open_count);
  448. pstat->cryptoDomain = ap_domain_index;
  449. zcrypt_status_mask(pstat->status);
  450. zcrypt_qdepth_mask(pstat->qdepth);
  451. ret = 0;
  452. if (copy_to_user((void __user *) arg, pstat, sizeof(*pstat)))
  453. ret = -EFAULT;
  454. kfree(pstat);
  455. return ret;
  456. }
  457. static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
  458. unsigned long arg)
  459. {
  460. int rc;
  461. switch (cmd) {
  462. case ICARSAMODEXPO: {
  463. struct ica_rsa_modexpo __user *umex = (void __user *) arg;
  464. struct ica_rsa_modexpo mex;
  465. if (copy_from_user(&mex, umex, sizeof(mex)))
  466. return -EFAULT;
  467. do {
  468. rc = zcrypt_rsa_modexpo(&mex);
  469. } while (rc == -EAGAIN);
  470. if (rc)
  471. return rc;
  472. return put_user(mex.outputdatalength, &umex->outputdatalength);
  473. }
  474. case ICARSACRT: {
  475. struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg;
  476. struct ica_rsa_modexpo_crt crt;
  477. if (copy_from_user(&crt, ucrt, sizeof(crt)))
  478. return -EFAULT;
  479. do {
  480. rc = zcrypt_rsa_crt(&crt);
  481. } while (rc == -EAGAIN);
  482. if (rc)
  483. return rc;
  484. return put_user(crt.outputdatalength, &ucrt->outputdatalength);
  485. }
  486. case Z90STAT_STATUS_MASK: {
  487. char status[AP_DEVICES];
  488. zcrypt_status_mask(status);
  489. if (copy_to_user((char __user *) arg, status,
  490. sizeof(char) * AP_DEVICES))
  491. return -EFAULT;
  492. return 0;
  493. }
  494. case Z90STAT_QDEPTH_MASK: {
  495. char qdepth[AP_DEVICES];
  496. zcrypt_qdepth_mask(qdepth);
  497. if (copy_to_user((char __user *) arg, qdepth,
  498. sizeof(char) * AP_DEVICES))
  499. return -EFAULT;
  500. return 0;
  501. }
  502. case Z90STAT_PERDEV_REQCNT: {
  503. int reqcnt[AP_DEVICES];
  504. zcrypt_perdev_reqcnt(reqcnt);
  505. if (copy_to_user((int __user *) arg, reqcnt,
  506. sizeof(int) * AP_DEVICES))
  507. return -EFAULT;
  508. return 0;
  509. }
  510. case Z90STAT_REQUESTQ_COUNT:
  511. return put_user(zcrypt_requestq_count(), (int __user *) arg);
  512. case Z90STAT_PENDINGQ_COUNT:
  513. return put_user(zcrypt_pendingq_count(), (int __user *) arg);
  514. case Z90STAT_TOTALOPEN_COUNT:
  515. return put_user(atomic_read(&zcrypt_open_count),
  516. (int __user *) arg);
  517. case Z90STAT_DOMAIN_INDEX:
  518. return put_user(ap_domain_index, (int __user *) arg);
  519. /**
  520. * Deprecated ioctls. Don't add another device count ioctl,
  521. * you can count them yourself in the user space with the
  522. * output of the Z90STAT_STATUS_MASK ioctl.
  523. */
  524. case ICAZ90STATUS:
  525. return zcrypt_ica_status(filp, arg);
  526. case Z90STAT_TOTALCOUNT:
  527. return put_user(zcrypt_device_count, (int __user *) arg);
  528. case Z90STAT_PCICACOUNT:
  529. return put_user(zcrypt_count_type(ZCRYPT_PCICA),
  530. (int __user *) arg);
  531. case Z90STAT_PCICCCOUNT:
  532. return put_user(zcrypt_count_type(ZCRYPT_PCICC),
  533. (int __user *) arg);
  534. case Z90STAT_PCIXCCMCL2COUNT:
  535. return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2),
  536. (int __user *) arg);
  537. case Z90STAT_PCIXCCMCL3COUNT:
  538. return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL3),
  539. (int __user *) arg);
  540. case Z90STAT_PCIXCCCOUNT:
  541. return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2) +
  542. zcrypt_count_type(ZCRYPT_PCIXCC_MCL3),
  543. (int __user *) arg);
  544. case Z90STAT_CEX2CCOUNT:
  545. return put_user(zcrypt_count_type(ZCRYPT_CEX2C),
  546. (int __user *) arg);
  547. case Z90STAT_CEX2ACOUNT:
  548. return put_user(zcrypt_count_type(ZCRYPT_CEX2A),
  549. (int __user *) arg);
  550. default:
  551. /* unknown ioctl number */
  552. return -ENOIOCTLCMD;
  553. }
  554. }
  555. #ifdef CONFIG_COMPAT
  556. /**
  557. * ioctl32 conversion routines
  558. */
  559. struct compat_ica_rsa_modexpo {
  560. compat_uptr_t inputdata;
  561. unsigned int inputdatalength;
  562. compat_uptr_t outputdata;
  563. unsigned int outputdatalength;
  564. compat_uptr_t b_key;
  565. compat_uptr_t n_modulus;
  566. };
  567. static long trans_modexpo32(struct file *filp, unsigned int cmd,
  568. unsigned long arg)
  569. {
  570. struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg);
  571. struct compat_ica_rsa_modexpo mex32;
  572. struct ica_rsa_modexpo mex64;
  573. long rc;
  574. if (copy_from_user(&mex32, umex32, sizeof(mex32)))
  575. return -EFAULT;
  576. mex64.inputdata = compat_ptr(mex32.inputdata);
  577. mex64.inputdatalength = mex32.inputdatalength;
  578. mex64.outputdata = compat_ptr(mex32.outputdata);
  579. mex64.outputdatalength = mex32.outputdatalength;
  580. mex64.b_key = compat_ptr(mex32.b_key);
  581. mex64.n_modulus = compat_ptr(mex32.n_modulus);
  582. do {
  583. rc = zcrypt_rsa_modexpo(&mex64);
  584. } while (rc == -EAGAIN);
  585. if (!rc)
  586. rc = put_user(mex64.outputdatalength,
  587. &umex32->outputdatalength);
  588. return rc;
  589. }
  590. struct compat_ica_rsa_modexpo_crt {
  591. compat_uptr_t inputdata;
  592. unsigned int inputdatalength;
  593. compat_uptr_t outputdata;
  594. unsigned int outputdatalength;
  595. compat_uptr_t bp_key;
  596. compat_uptr_t bq_key;
  597. compat_uptr_t np_prime;
  598. compat_uptr_t nq_prime;
  599. compat_uptr_t u_mult_inv;
  600. };
  601. static long trans_modexpo_crt32(struct file *filp, unsigned int cmd,
  602. unsigned long arg)
  603. {
  604. struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg);
  605. struct compat_ica_rsa_modexpo_crt crt32;
  606. struct ica_rsa_modexpo_crt crt64;
  607. long rc;
  608. if (copy_from_user(&crt32, ucrt32, sizeof(crt32)))
  609. return -EFAULT;
  610. crt64.inputdata = compat_ptr(crt32.inputdata);
  611. crt64.inputdatalength = crt32.inputdatalength;
  612. crt64.outputdata= compat_ptr(crt32.outputdata);
  613. crt64.outputdatalength = crt32.outputdatalength;
  614. crt64.bp_key = compat_ptr(crt32.bp_key);
  615. crt64.bq_key = compat_ptr(crt32.bq_key);
  616. crt64.np_prime = compat_ptr(crt32.np_prime);
  617. crt64.nq_prime = compat_ptr(crt32.nq_prime);
  618. crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv);
  619. do {
  620. rc = zcrypt_rsa_crt(&crt64);
  621. } while (rc == -EAGAIN);
  622. if (!rc)
  623. rc = put_user(crt64.outputdatalength,
  624. &ucrt32->outputdatalength);
  625. return rc;
  626. }
  627. long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd,
  628. unsigned long arg)
  629. {
  630. if (cmd == ICARSAMODEXPO)
  631. return trans_modexpo32(filp, cmd, arg);
  632. if (cmd == ICARSACRT)
  633. return trans_modexpo_crt32(filp, cmd, arg);
  634. return zcrypt_unlocked_ioctl(filp, cmd, arg);
  635. }
  636. #endif
  637. /**
  638. * Misc device file operations.
  639. */
  640. static struct file_operations zcrypt_fops = {
  641. .owner = THIS_MODULE,
  642. .read = zcrypt_read,
  643. .write = zcrypt_write,
  644. .unlocked_ioctl = zcrypt_unlocked_ioctl,
  645. #ifdef CONFIG_COMPAT
  646. .compat_ioctl = zcrypt_compat_ioctl,
  647. #endif
  648. .open = zcrypt_open,
  649. .release = zcrypt_release
  650. };
  651. /**
  652. * Misc device.
  653. */
  654. static struct miscdevice zcrypt_misc_device = {
  655. .minor = MISC_DYNAMIC_MINOR,
  656. .name = "z90crypt",
  657. .fops = &zcrypt_fops,
  658. };
  659. /**
  660. * Deprecated /proc entry support.
  661. */
  662. static struct proc_dir_entry *zcrypt_entry;
  663. static inline int sprintcl(unsigned char *outaddr, unsigned char *addr,
  664. unsigned int len)
  665. {
  666. int hl, i;
  667. hl = 0;
  668. for (i = 0; i < len; i++)
  669. hl += sprintf(outaddr+hl, "%01x", (unsigned int) addr[i]);
  670. hl += sprintf(outaddr+hl, " ");
  671. return hl;
  672. }
  673. static inline int sprintrw(unsigned char *outaddr, unsigned char *addr,
  674. unsigned int len)
  675. {
  676. int hl, inl, c, cx;
  677. hl = sprintf(outaddr, " ");
  678. inl = 0;
  679. for (c = 0; c < (len / 16); c++) {
  680. hl += sprintcl(outaddr+hl, addr+inl, 16);
  681. inl += 16;
  682. }
  683. cx = len%16;
  684. if (cx) {
  685. hl += sprintcl(outaddr+hl, addr+inl, cx);
  686. inl += cx;
  687. }
  688. hl += sprintf(outaddr+hl, "\n");
  689. return hl;
  690. }
  691. static inline int sprinthx(unsigned char *title, unsigned char *outaddr,
  692. unsigned char *addr, unsigned int len)
  693. {
  694. int hl, inl, r, rx;
  695. hl = sprintf(outaddr, "\n%s\n", title);
  696. inl = 0;
  697. for (r = 0; r < (len / 64); r++) {
  698. hl += sprintrw(outaddr+hl, addr+inl, 64);
  699. inl += 64;
  700. }
  701. rx = len % 64;
  702. if (rx) {
  703. hl += sprintrw(outaddr+hl, addr+inl, rx);
  704. inl += rx;
  705. }
  706. hl += sprintf(outaddr+hl, "\n");
  707. return hl;
  708. }
  709. static inline int sprinthx4(unsigned char *title, unsigned char *outaddr,
  710. unsigned int *array, unsigned int len)
  711. {
  712. int hl, r;
  713. hl = sprintf(outaddr, "\n%s\n", title);
  714. for (r = 0; r < len; r++) {
  715. if ((r % 8) == 0)
  716. hl += sprintf(outaddr+hl, " ");
  717. hl += sprintf(outaddr+hl, "%08X ", array[r]);
  718. if ((r % 8) == 7)
  719. hl += sprintf(outaddr+hl, "\n");
  720. }
  721. hl += sprintf(outaddr+hl, "\n");
  722. return hl;
  723. }
  724. static int zcrypt_status_read(char *resp_buff, char **start, off_t offset,
  725. int count, int *eof, void *data)
  726. {
  727. unsigned char *workarea;
  728. int len;
  729. len = 0;
  730. /* resp_buff is a page. Use the right half for a work area */
  731. workarea = resp_buff + 2000;
  732. len += sprintf(resp_buff + len, "\nzcrypt version: %d.%d.%d\n",
  733. ZCRYPT_VERSION, ZCRYPT_RELEASE, ZCRYPT_VARIANT);
  734. len += sprintf(resp_buff + len, "Cryptographic domain: %d\n",
  735. ap_domain_index);
  736. len += sprintf(resp_buff + len, "Total device count: %d\n",
  737. zcrypt_device_count);
  738. len += sprintf(resp_buff + len, "PCICA count: %d\n",
  739. zcrypt_count_type(ZCRYPT_PCICA));
  740. len += sprintf(resp_buff + len, "PCICC count: %d\n",
  741. zcrypt_count_type(ZCRYPT_PCICC));
  742. len += sprintf(resp_buff + len, "PCIXCC MCL2 count: %d\n",
  743. zcrypt_count_type(ZCRYPT_PCIXCC_MCL2));
  744. len += sprintf(resp_buff + len, "PCIXCC MCL3 count: %d\n",
  745. zcrypt_count_type(ZCRYPT_PCIXCC_MCL3));
  746. len += sprintf(resp_buff + len, "CEX2C count: %d\n",
  747. zcrypt_count_type(ZCRYPT_CEX2C));
  748. len += sprintf(resp_buff + len, "CEX2A count: %d\n",
  749. zcrypt_count_type(ZCRYPT_CEX2A));
  750. len += sprintf(resp_buff + len, "requestq count: %d\n",
  751. zcrypt_requestq_count());
  752. len += sprintf(resp_buff + len, "pendingq count: %d\n",
  753. zcrypt_pendingq_count());
  754. len += sprintf(resp_buff + len, "Total open handles: %d\n\n",
  755. atomic_read(&zcrypt_open_count));
  756. zcrypt_status_mask(workarea);
  757. len += sprinthx("Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) "
  758. "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A",
  759. resp_buff+len, workarea, AP_DEVICES);
  760. zcrypt_qdepth_mask(workarea);
  761. len += sprinthx("Waiting work element counts",
  762. resp_buff+len, workarea, AP_DEVICES);
  763. zcrypt_perdev_reqcnt((unsigned int *) workarea);
  764. len += sprinthx4("Per-device successfully completed request counts",
  765. resp_buff+len,(unsigned int *) workarea, AP_DEVICES);
  766. *eof = 1;
  767. memset((void *) workarea, 0x00, AP_DEVICES * sizeof(unsigned int));
  768. return len;
  769. }
  770. static void zcrypt_disable_card(int index)
  771. {
  772. struct zcrypt_device *zdev;
  773. spin_lock_bh(&zcrypt_device_lock);
  774. list_for_each_entry(zdev, &zcrypt_device_list, list)
  775. if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) {
  776. zdev->online = 0;
  777. ap_flush_queue(zdev->ap_dev);
  778. break;
  779. }
  780. spin_unlock_bh(&zcrypt_device_lock);
  781. }
  782. static void zcrypt_enable_card(int index)
  783. {
  784. struct zcrypt_device *zdev;
  785. spin_lock_bh(&zcrypt_device_lock);
  786. list_for_each_entry(zdev, &zcrypt_device_list, list)
  787. if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) {
  788. zdev->online = 1;
  789. break;
  790. }
  791. spin_unlock_bh(&zcrypt_device_lock);
  792. }
  793. static int zcrypt_status_write(struct file *file, const char __user *buffer,
  794. unsigned long count, void *data)
  795. {
  796. unsigned char *lbuf, *ptr;
  797. unsigned long local_count;
  798. int j;
  799. if (count <= 0)
  800. return 0;
  801. #define LBUFSIZE 1200UL
  802. lbuf = kmalloc(LBUFSIZE, GFP_KERNEL);
  803. if (!lbuf) {
  804. PRINTK("kmalloc failed!\n");
  805. return 0;
  806. }
  807. local_count = min(LBUFSIZE - 1, count);
  808. if (copy_from_user(lbuf, buffer, local_count) != 0) {
  809. kfree(lbuf);
  810. return -EFAULT;
  811. }
  812. lbuf[local_count] = '\0';
  813. ptr = strstr(lbuf, "Online devices");
  814. if (!ptr) {
  815. PRINTK("Unable to parse data (missing \"Online devices\")\n");
  816. goto out;
  817. }
  818. ptr = strstr(ptr, "\n");
  819. if (!ptr) {
  820. PRINTK("Unable to parse data (missing newline "
  821. "after \"Online devices\")\n");
  822. goto out;
  823. }
  824. ptr++;
  825. if (strstr(ptr, "Waiting work element counts") == NULL) {
  826. PRINTK("Unable to parse data (missing "
  827. "\"Waiting work element counts\")\n");
  828. goto out;
  829. }
  830. for (j = 0; j < 64 && *ptr; ptr++) {
  831. /**
  832. * '0' for no device, '1' for PCICA, '2' for PCICC,
  833. * '3' for PCIXCC_MCL2, '4' for PCIXCC_MCL3,
  834. * '5' for CEX2C and '6' for CEX2A'
  835. */
  836. if (*ptr >= '0' && *ptr <= '6')
  837. j++;
  838. else if (*ptr == 'd' || *ptr == 'D')
  839. zcrypt_disable_card(j++);
  840. else if (*ptr == 'e' || *ptr == 'E')
  841. zcrypt_enable_card(j++);
  842. else if (*ptr != ' ' && *ptr != '\t')
  843. break;
  844. }
  845. out:
  846. kfree(lbuf);
  847. return count;
  848. }
  849. /**
  850. * The module initialization code.
  851. */
  852. int __init zcrypt_api_init(void)
  853. {
  854. int rc;
  855. /* Register the request sprayer. */
  856. rc = misc_register(&zcrypt_misc_device);
  857. if (rc < 0) {
  858. PRINTKW(KERN_ERR "misc_register (minor %d) failed with %d\n",
  859. zcrypt_misc_device.minor, rc);
  860. goto out;
  861. }
  862. /* Set up the proc file system */
  863. zcrypt_entry = create_proc_entry("driver/z90crypt", 0644, NULL);
  864. if (!zcrypt_entry) {
  865. PRINTK("Couldn't create z90crypt proc entry\n");
  866. rc = -ENOMEM;
  867. goto out_misc;
  868. }
  869. zcrypt_entry->nlink = 1;
  870. zcrypt_entry->data = NULL;
  871. zcrypt_entry->read_proc = zcrypt_status_read;
  872. zcrypt_entry->write_proc = zcrypt_status_write;
  873. return 0;
  874. out_misc:
  875. misc_deregister(&zcrypt_misc_device);
  876. out:
  877. return rc;
  878. }
  879. /**
  880. * The module termination code.
  881. */
  882. void zcrypt_api_exit(void)
  883. {
  884. remove_proc_entry("driver/z90crypt", NULL);
  885. misc_deregister(&zcrypt_misc_device);
  886. }
  887. #ifndef CONFIG_ZCRYPT_MONOLITHIC
  888. module_init(zcrypt_api_init);
  889. module_exit(zcrypt_api_exit);
  890. #endif