hash_core.c 51 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019
  1. /*
  2. * Cryptographic API.
  3. * Support for Nomadik hardware crypto engine.
  4. * Copyright (C) ST-Ericsson SA 2010
  5. * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson
  6. * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson
  7. * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
  8. * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
  9. * Author: Andreas Westin <andreas.westin@stericsson.com> for ST-Ericsson.
  10. * License terms: GNU General Public License (GPL) version 2
  11. */
  12. #include <linux/clk.h>
  13. #include <linux/device.h>
  14. #include <linux/err.h>
  15. #include <linux/init.h>
  16. #include <linux/io.h>
  17. #include <linux/klist.h>
  18. #include <linux/kernel.h>
  19. #include <linux/module.h>
  20. #include <linux/platform_device.h>
  21. #include <linux/crypto.h>
  22. #include <linux/regulator/consumer.h>
  23. #include <linux/dmaengine.h>
  24. #include <linux/bitops.h>
  25. #include <crypto/internal/hash.h>
  26. #include <crypto/sha.h>
  27. #include <crypto/scatterwalk.h>
  28. #include <crypto/algapi.h>
  29. #include <mach/crypto-ux500.h>
  30. #include <mach/hardware.h>
  31. #include "hash_alg.h"
  32. #define DEV_DBG_NAME "hashX hashX:"
  33. static int hash_mode;
  34. module_param(hash_mode, int, 0);
  35. MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1");
  36. /**
  37. * Pre-calculated empty message digests.
  38. */
  39. static u8 zero_message_hash_sha1[SHA1_DIGEST_SIZE] = {
  40. 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d,
  41. 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90,
  42. 0xaf, 0xd8, 0x07, 0x09
  43. };
  44. static u8 zero_message_hash_sha256[SHA256_DIGEST_SIZE] = {
  45. 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14,
  46. 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24,
  47. 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c,
  48. 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55
  49. };
  50. /* HMAC-SHA1, no key */
  51. static u8 zero_message_hmac_sha1[SHA1_DIGEST_SIZE] = {
  52. 0xfb, 0xdb, 0x1d, 0x1b, 0x18, 0xaa, 0x6c, 0x08,
  53. 0x32, 0x4b, 0x7d, 0x64, 0xb7, 0x1f, 0xb7, 0x63,
  54. 0x70, 0x69, 0x0e, 0x1d
  55. };
  56. /* HMAC-SHA256, no key */
  57. static u8 zero_message_hmac_sha256[SHA256_DIGEST_SIZE] = {
  58. 0xb6, 0x13, 0x67, 0x9a, 0x08, 0x14, 0xd9, 0xec,
  59. 0x77, 0x2f, 0x95, 0xd7, 0x78, 0xc3, 0x5f, 0xc5,
  60. 0xff, 0x16, 0x97, 0xc4, 0x93, 0x71, 0x56, 0x53,
  61. 0xc6, 0xc7, 0x12, 0x14, 0x42, 0x92, 0xc5, 0xad
  62. };
  63. /**
  64. * struct hash_driver_data - data specific to the driver.
  65. *
  66. * @device_list: A list of registered devices to choose from.
  67. * @device_allocation: A semaphore initialized with number of devices.
  68. */
  69. struct hash_driver_data {
  70. struct klist device_list;
  71. struct semaphore device_allocation;
  72. };
  73. static struct hash_driver_data driver_data;
  74. /* Declaration of functions */
  75. /**
  76. * hash_messagepad - Pads a message and write the nblw bits.
  77. * @device_data: Structure for the hash device.
  78. * @message: Last word of a message
  79. * @index_bytes: The number of bytes in the last message
  80. *
  81. * This function manages the final part of the digest calculation, when less
  82. * than 512 bits (64 bytes) remain in message. This means index_bytes < 64.
  83. *
  84. */
  85. static void hash_messagepad(struct hash_device_data *device_data,
  86. const u32 *message, u8 index_bytes);
  87. /**
  88. * release_hash_device - Releases a previously allocated hash device.
  89. * @device_data: Structure for the hash device.
  90. *
  91. */
  92. static void release_hash_device(struct hash_device_data *device_data)
  93. {
  94. spin_lock(&device_data->ctx_lock);
  95. device_data->current_ctx->device = NULL;
  96. device_data->current_ctx = NULL;
  97. spin_unlock(&device_data->ctx_lock);
  98. /*
  99. * The down_interruptible part for this semaphore is called in
  100. * cryp_get_device_data.
  101. */
  102. up(&driver_data.device_allocation);
  103. }
  104. static void hash_dma_setup_channel(struct hash_device_data *device_data,
  105. struct device *dev)
  106. {
  107. struct hash_platform_data *platform_data = dev->platform_data;
  108. dma_cap_zero(device_data->dma.mask);
  109. dma_cap_set(DMA_SLAVE, device_data->dma.mask);
  110. device_data->dma.cfg_mem2hash = platform_data->mem_to_engine;
  111. device_data->dma.chan_mem2hash =
  112. dma_request_channel(device_data->dma.mask,
  113. platform_data->dma_filter,
  114. device_data->dma.cfg_mem2hash);
  115. init_completion(&device_data->dma.complete);
  116. }
  117. static void hash_dma_callback(void *data)
  118. {
  119. struct hash_ctx *ctx = (struct hash_ctx *) data;
  120. complete(&ctx->device->dma.complete);
  121. }
  122. static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
  123. int len, enum dma_data_direction direction)
  124. {
  125. struct dma_async_tx_descriptor *desc = NULL;
  126. struct dma_chan *channel = NULL;
  127. dma_cookie_t cookie;
  128. if (direction != DMA_TO_DEVICE) {
  129. dev_err(ctx->device->dev, "[%s] Invalid DMA direction",
  130. __func__);
  131. return -EFAULT;
  132. }
  133. sg->length = ALIGN(sg->length, HASH_DMA_ALIGN_SIZE);
  134. channel = ctx->device->dma.chan_mem2hash;
  135. ctx->device->dma.sg = sg;
  136. ctx->device->dma.sg_len = dma_map_sg(channel->device->dev,
  137. ctx->device->dma.sg, ctx->device->dma.nents,
  138. direction);
  139. if (!ctx->device->dma.sg_len) {
  140. dev_err(ctx->device->dev,
  141. "[%s]: Could not map the sg list (TO_DEVICE)",
  142. __func__);
  143. return -EFAULT;
  144. }
  145. dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer "
  146. "(TO_DEVICE)", __func__);
  147. desc = channel->device->device_prep_slave_sg(channel,
  148. ctx->device->dma.sg, ctx->device->dma.sg_len,
  149. direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT, NULL);
  150. if (!desc) {
  151. dev_err(ctx->device->dev,
  152. "[%s]: device_prep_slave_sg() failed!", __func__);
  153. return -EFAULT;
  154. }
  155. desc->callback = hash_dma_callback;
  156. desc->callback_param = ctx;
  157. cookie = desc->tx_submit(desc);
  158. dma_async_issue_pending(channel);
  159. return 0;
  160. }
  161. static void hash_dma_done(struct hash_ctx *ctx)
  162. {
  163. struct dma_chan *chan;
  164. chan = ctx->device->dma.chan_mem2hash;
  165. chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
  166. dma_unmap_sg(chan->device->dev, ctx->device->dma.sg,
  167. ctx->device->dma.sg_len, DMA_TO_DEVICE);
  168. }
  169. static int hash_dma_write(struct hash_ctx *ctx,
  170. struct scatterlist *sg, int len)
  171. {
  172. int error = hash_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE);
  173. if (error) {
  174. dev_dbg(ctx->device->dev, "[%s]: hash_set_dma_transfer() "
  175. "failed", __func__);
  176. return error;
  177. }
  178. return len;
  179. }
  180. /**
  181. * get_empty_message_digest - Returns a pre-calculated digest for
  182. * the empty message.
  183. * @device_data: Structure for the hash device.
  184. * @zero_hash: Buffer to return the empty message digest.
  185. * @zero_hash_size: Hash size of the empty message digest.
  186. * @zero_digest: True if zero_digest returned.
  187. */
  188. static int get_empty_message_digest(
  189. struct hash_device_data *device_data,
  190. u8 *zero_hash, u32 *zero_hash_size, bool *zero_digest)
  191. {
  192. int ret = 0;
  193. struct hash_ctx *ctx = device_data->current_ctx;
  194. *zero_digest = false;
  195. /**
  196. * Caller responsible for ctx != NULL.
  197. */
  198. if (HASH_OPER_MODE_HASH == ctx->config.oper_mode) {
  199. if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
  200. memcpy(zero_hash, &zero_message_hash_sha1[0],
  201. SHA1_DIGEST_SIZE);
  202. *zero_hash_size = SHA1_DIGEST_SIZE;
  203. *zero_digest = true;
  204. } else if (HASH_ALGO_SHA256 ==
  205. ctx->config.algorithm) {
  206. memcpy(zero_hash, &zero_message_hash_sha256[0],
  207. SHA256_DIGEST_SIZE);
  208. *zero_hash_size = SHA256_DIGEST_SIZE;
  209. *zero_digest = true;
  210. } else {
  211. dev_err(device_data->dev, "[%s] "
  212. "Incorrect algorithm!"
  213. , __func__);
  214. ret = -EINVAL;
  215. goto out;
  216. }
  217. } else if (HASH_OPER_MODE_HMAC == ctx->config.oper_mode) {
  218. if (!ctx->keylen) {
  219. if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
  220. memcpy(zero_hash, &zero_message_hmac_sha1[0],
  221. SHA1_DIGEST_SIZE);
  222. *zero_hash_size = SHA1_DIGEST_SIZE;
  223. *zero_digest = true;
  224. } else if (HASH_ALGO_SHA256 == ctx->config.algorithm) {
  225. memcpy(zero_hash, &zero_message_hmac_sha256[0],
  226. SHA256_DIGEST_SIZE);
  227. *zero_hash_size = SHA256_DIGEST_SIZE;
  228. *zero_digest = true;
  229. } else {
  230. dev_err(device_data->dev, "[%s] "
  231. "Incorrect algorithm!"
  232. , __func__);
  233. ret = -EINVAL;
  234. goto out;
  235. }
  236. } else {
  237. dev_dbg(device_data->dev, "[%s] Continue hash "
  238. "calculation, since hmac key avalable",
  239. __func__);
  240. }
  241. }
  242. out:
  243. return ret;
  244. }
  245. /**
  246. * hash_disable_power - Request to disable power and clock.
  247. * @device_data: Structure for the hash device.
  248. * @save_device_state: If true, saves the current hw state.
  249. *
  250. * This function request for disabling power (regulator) and clock,
  251. * and could also save current hw state.
  252. */
  253. static int hash_disable_power(
  254. struct hash_device_data *device_data,
  255. bool save_device_state)
  256. {
  257. int ret = 0;
  258. struct device *dev = device_data->dev;
  259. spin_lock(&device_data->power_state_lock);
  260. if (!device_data->power_state)
  261. goto out;
  262. if (save_device_state) {
  263. hash_save_state(device_data,
  264. &device_data->state);
  265. device_data->restore_dev_state = true;
  266. }
  267. clk_disable(device_data->clk);
  268. ret = regulator_disable(device_data->regulator);
  269. if (ret)
  270. dev_err(dev, "[%s] regulator_disable() failed!", __func__);
  271. device_data->power_state = false;
  272. out:
  273. spin_unlock(&device_data->power_state_lock);
  274. return ret;
  275. }
  276. /**
  277. * hash_enable_power - Request to enable power and clock.
  278. * @device_data: Structure for the hash device.
  279. * @restore_device_state: If true, restores a previous saved hw state.
  280. *
  281. * This function request for enabling power (regulator) and clock,
  282. * and could also restore a previously saved hw state.
  283. */
  284. static int hash_enable_power(
  285. struct hash_device_data *device_data,
  286. bool restore_device_state)
  287. {
  288. int ret = 0;
  289. struct device *dev = device_data->dev;
  290. spin_lock(&device_data->power_state_lock);
  291. if (!device_data->power_state) {
  292. ret = regulator_enable(device_data->regulator);
  293. if (ret) {
  294. dev_err(dev, "[%s]: regulator_enable() failed!",
  295. __func__);
  296. goto out;
  297. }
  298. ret = clk_enable(device_data->clk);
  299. if (ret) {
  300. dev_err(dev, "[%s]: clk_enable() failed!",
  301. __func__);
  302. ret = regulator_disable(
  303. device_data->regulator);
  304. goto out;
  305. }
  306. device_data->power_state = true;
  307. }
  308. if (device_data->restore_dev_state) {
  309. if (restore_device_state) {
  310. device_data->restore_dev_state = false;
  311. hash_resume_state(device_data,
  312. &device_data->state);
  313. }
  314. }
  315. out:
  316. spin_unlock(&device_data->power_state_lock);
  317. return ret;
  318. }
  319. /**
  320. * hash_get_device_data - Checks for an available hash device and return it.
  321. * @hash_ctx: Structure for the hash context.
  322. * @device_data: Structure for the hash device.
  323. *
  324. * This function check for an available hash device and return it to
  325. * the caller.
  326. * Note! Caller need to release the device, calling up().
  327. */
  328. static int hash_get_device_data(struct hash_ctx *ctx,
  329. struct hash_device_data **device_data)
  330. {
  331. int ret;
  332. struct klist_iter device_iterator;
  333. struct klist_node *device_node;
  334. struct hash_device_data *local_device_data = NULL;
  335. /* Wait until a device is available */
  336. ret = down_interruptible(&driver_data.device_allocation);
  337. if (ret)
  338. return ret; /* Interrupted */
  339. /* Select a device */
  340. klist_iter_init(&driver_data.device_list, &device_iterator);
  341. device_node = klist_next(&device_iterator);
  342. while (device_node) {
  343. local_device_data = container_of(device_node,
  344. struct hash_device_data, list_node);
  345. spin_lock(&local_device_data->ctx_lock);
  346. /* current_ctx allocates a device, NULL = unallocated */
  347. if (local_device_data->current_ctx) {
  348. device_node = klist_next(&device_iterator);
  349. } else {
  350. local_device_data->current_ctx = ctx;
  351. ctx->device = local_device_data;
  352. spin_unlock(&local_device_data->ctx_lock);
  353. break;
  354. }
  355. spin_unlock(&local_device_data->ctx_lock);
  356. }
  357. klist_iter_exit(&device_iterator);
  358. if (!device_node) {
  359. /**
  360. * No free device found.
  361. * Since we allocated a device with down_interruptible, this
  362. * should not be able to happen.
  363. * Number of available devices, which are contained in
  364. * device_allocation, is therefore decremented by not doing
  365. * an up(device_allocation).
  366. */
  367. return -EBUSY;
  368. }
  369. *device_data = local_device_data;
  370. return 0;
  371. }
  372. /**
  373. * hash_hw_write_key - Writes the key to the hardware registries.
  374. *
  375. * @device_data: Structure for the hash device.
  376. * @key: Key to be written.
  377. * @keylen: The lengt of the key.
  378. *
  379. * Note! This function DOES NOT write to the NBLW registry, even though
  380. * specified in the the hw design spec. Either due to incorrect info in the
  381. * spec or due to a bug in the hw.
  382. */
  383. static void hash_hw_write_key(struct hash_device_data *device_data,
  384. const u8 *key, unsigned int keylen)
  385. {
  386. u32 word = 0;
  387. int nwords = 1;
  388. HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
  389. while (keylen >= 4) {
  390. u32 *key_word = (u32 *)key;
  391. HASH_SET_DIN(key_word, nwords);
  392. keylen -= 4;
  393. key += 4;
  394. }
  395. /* Take care of the remaining bytes in the last word */
  396. if (keylen) {
  397. word = 0;
  398. while (keylen) {
  399. word |= (key[keylen - 1] << (8 * (keylen - 1)));
  400. keylen--;
  401. }
  402. HASH_SET_DIN(&word, nwords);
  403. }
  404. while (device_data->base->str & HASH_STR_DCAL_MASK)
  405. cpu_relax();
  406. HASH_SET_DCAL;
  407. while (device_data->base->str & HASH_STR_DCAL_MASK)
  408. cpu_relax();
  409. }
  410. /**
  411. * init_hash_hw - Initialise the hash hardware for a new calculation.
  412. * @device_data: Structure for the hash device.
  413. * @ctx: The hash context.
  414. *
  415. * This function will enable the bits needed to clear and start a new
  416. * calculation.
  417. */
  418. static int init_hash_hw(struct hash_device_data *device_data,
  419. struct hash_ctx *ctx)
  420. {
  421. int ret = 0;
  422. ret = hash_setconfiguration(device_data, &ctx->config);
  423. if (ret) {
  424. dev_err(device_data->dev, "[%s] hash_setconfiguration() "
  425. "failed!", __func__);
  426. return ret;
  427. }
  428. hash_begin(device_data, ctx);
  429. if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
  430. hash_hw_write_key(device_data, ctx->key, ctx->keylen);
  431. return ret;
  432. }
  433. /**
  434. * hash_get_nents - Return number of entries (nents) in scatterlist (sg).
  435. *
  436. * @sg: Scatterlist.
  437. * @size: Size in bytes.
  438. * @aligned: True if sg data aligned to work in DMA mode.
  439. *
  440. */
  441. static int hash_get_nents(struct scatterlist *sg, int size, bool *aligned)
  442. {
  443. int nents = 0;
  444. bool aligned_data = true;
  445. while (size > 0 && sg) {
  446. nents++;
  447. size -= sg->length;
  448. /* hash_set_dma_transfer will align last nent */
  449. if ((aligned && !IS_ALIGNED(sg->offset, HASH_DMA_ALIGN_SIZE))
  450. || (!IS_ALIGNED(sg->length, HASH_DMA_ALIGN_SIZE) &&
  451. size > 0))
  452. aligned_data = false;
  453. sg = sg_next(sg);
  454. }
  455. if (aligned)
  456. *aligned = aligned_data;
  457. if (size != 0)
  458. return -EFAULT;
  459. return nents;
  460. }
  461. /**
  462. * hash_dma_valid_data - checks for dma valid sg data.
  463. * @sg: Scatterlist.
  464. * @datasize: Datasize in bytes.
  465. *
  466. * NOTE! This function checks for dma valid sg data, since dma
  467. * only accept datasizes of even wordsize.
  468. */
  469. static bool hash_dma_valid_data(struct scatterlist *sg, int datasize)
  470. {
  471. bool aligned;
  472. /* Need to include at least one nent, else error */
  473. if (hash_get_nents(sg, datasize, &aligned) < 1)
  474. return false;
  475. return aligned;
  476. }
  477. /**
  478. * hash_init - Common hash init function for SHA1/SHA2 (SHA256).
  479. * @req: The hash request for the job.
  480. *
  481. * Initialize structures.
  482. */
  483. static int hash_init(struct ahash_request *req)
  484. {
  485. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  486. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  487. struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
  488. if (!ctx->key)
  489. ctx->keylen = 0;
  490. memset(&req_ctx->state, 0, sizeof(struct hash_state));
  491. req_ctx->updated = 0;
  492. if (hash_mode == HASH_MODE_DMA) {
  493. if ((ctx->config.oper_mode == HASH_OPER_MODE_HMAC) &&
  494. cpu_is_u5500()) {
  495. pr_debug(DEV_DBG_NAME " [%s] HMAC and DMA not working "
  496. "on u5500, directing to CPU mode.",
  497. __func__);
  498. req_ctx->dma_mode = false; /* Don't use DMA */
  499. goto out;
  500. }
  501. if (req->nbytes < HASH_DMA_ALIGN_SIZE) {
  502. req_ctx->dma_mode = false; /* Don't use DMA */
  503. pr_debug(DEV_DBG_NAME " [%s] DMA mode, but direct "
  504. "to CPU mode for data size < %d",
  505. __func__, HASH_DMA_ALIGN_SIZE);
  506. } else {
  507. if (req->nbytes >= HASH_DMA_PERFORMANCE_MIN_SIZE &&
  508. hash_dma_valid_data(req->src,
  509. req->nbytes)) {
  510. req_ctx->dma_mode = true;
  511. } else {
  512. req_ctx->dma_mode = false;
  513. pr_debug(DEV_DBG_NAME " [%s] DMA mode, but use"
  514. " CPU mode for datalength < %d"
  515. " or non-aligned data, except "
  516. "in last nent", __func__,
  517. HASH_DMA_PERFORMANCE_MIN_SIZE);
  518. }
  519. }
  520. }
  521. out:
  522. return 0;
  523. }
  524. /**
  525. * hash_processblock - This function processes a single block of 512 bits (64
  526. * bytes), word aligned, starting at message.
  527. * @device_data: Structure for the hash device.
  528. * @message: Block (512 bits) of message to be written to
  529. * the HASH hardware.
  530. *
  531. */
  532. static void hash_processblock(
  533. struct hash_device_data *device_data,
  534. const u32 *message, int length)
  535. {
  536. int len = length / HASH_BYTES_PER_WORD;
  537. /*
  538. * NBLW bits. Reset the number of bits in last word (NBLW).
  539. */
  540. HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
  541. /*
  542. * Write message data to the HASH_DIN register.
  543. */
  544. HASH_SET_DIN(message, len);
  545. }
  546. /**
  547. * hash_messagepad - Pads a message and write the nblw bits.
  548. * @device_data: Structure for the hash device.
  549. * @message: Last word of a message.
  550. * @index_bytes: The number of bytes in the last message.
  551. *
  552. * This function manages the final part of the digest calculation, when less
  553. * than 512 bits (64 bytes) remain in message. This means index_bytes < 64.
  554. *
  555. */
  556. static void hash_messagepad(struct hash_device_data *device_data,
  557. const u32 *message, u8 index_bytes)
  558. {
  559. int nwords = 1;
  560. /*
  561. * Clear hash str register, only clear NBLW
  562. * since DCAL will be reset by hardware.
  563. */
  564. HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
  565. /* Main loop */
  566. while (index_bytes >= 4) {
  567. HASH_SET_DIN(message, nwords);
  568. index_bytes -= 4;
  569. message++;
  570. }
  571. if (index_bytes)
  572. HASH_SET_DIN(message, nwords);
  573. while (device_data->base->str & HASH_STR_DCAL_MASK)
  574. cpu_relax();
  575. /* num_of_bytes == 0 => NBLW <- 0 (32 bits valid in DATAIN) */
  576. HASH_SET_NBLW(index_bytes * 8);
  577. dev_dbg(device_data->dev, "[%s] DIN=0x%08x NBLW=%d", __func__,
  578. readl_relaxed(&device_data->base->din),
  579. (int)(readl_relaxed(&device_data->base->str) &
  580. HASH_STR_NBLW_MASK));
  581. HASH_SET_DCAL;
  582. dev_dbg(device_data->dev, "[%s] after dcal -> DIN=0x%08x NBLW=%d",
  583. __func__, readl_relaxed(&device_data->base->din),
  584. (int)(readl_relaxed(&device_data->base->str) &
  585. HASH_STR_NBLW_MASK));
  586. while (device_data->base->str & HASH_STR_DCAL_MASK)
  587. cpu_relax();
  588. }
  589. /**
  590. * hash_incrementlength - Increments the length of the current message.
  591. * @ctx: Hash context
  592. * @incr: Length of message processed already
  593. *
  594. * Overflow cannot occur, because conditions for overflow are checked in
  595. * hash_hw_update.
  596. */
  597. static void hash_incrementlength(struct hash_req_ctx *ctx, u32 incr)
  598. {
  599. ctx->state.length.low_word += incr;
  600. /* Check for wrap-around */
  601. if (ctx->state.length.low_word < incr)
  602. ctx->state.length.high_word++;
  603. }
  604. /**
  605. * hash_setconfiguration - Sets the required configuration for the hash
  606. * hardware.
  607. * @device_data: Structure for the hash device.
  608. * @config: Pointer to a configuration structure.
  609. */
  610. int hash_setconfiguration(struct hash_device_data *device_data,
  611. struct hash_config *config)
  612. {
  613. int ret = 0;
  614. if (config->algorithm != HASH_ALGO_SHA1 &&
  615. config->algorithm != HASH_ALGO_SHA256)
  616. return -EPERM;
  617. /*
  618. * DATAFORM bits. Set the DATAFORM bits to 0b11, which means the data
  619. * to be written to HASH_DIN is considered as 32 bits.
  620. */
  621. HASH_SET_DATA_FORMAT(config->data_format);
  622. /*
  623. * ALGO bit. Set to 0b1 for SHA-1 and 0b0 for SHA-256
  624. */
  625. switch (config->algorithm) {
  626. case HASH_ALGO_SHA1:
  627. HASH_SET_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
  628. break;
  629. case HASH_ALGO_SHA256:
  630. HASH_CLEAR_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
  631. break;
  632. default:
  633. dev_err(device_data->dev, "[%s] Incorrect algorithm.",
  634. __func__);
  635. return -EPERM;
  636. }
  637. /*
  638. * MODE bit. This bit selects between HASH or HMAC mode for the
  639. * selected algorithm. 0b0 = HASH and 0b1 = HMAC.
  640. */
  641. if (HASH_OPER_MODE_HASH == config->oper_mode)
  642. HASH_CLEAR_BITS(&device_data->base->cr,
  643. HASH_CR_MODE_MASK);
  644. else if (HASH_OPER_MODE_HMAC == config->oper_mode) {
  645. HASH_SET_BITS(&device_data->base->cr,
  646. HASH_CR_MODE_MASK);
  647. if (device_data->current_ctx->keylen > HASH_BLOCK_SIZE) {
  648. /* Truncate key to blocksize */
  649. dev_dbg(device_data->dev, "[%s] LKEY set", __func__);
  650. HASH_SET_BITS(&device_data->base->cr,
  651. HASH_CR_LKEY_MASK);
  652. } else {
  653. dev_dbg(device_data->dev, "[%s] LKEY cleared",
  654. __func__);
  655. HASH_CLEAR_BITS(&device_data->base->cr,
  656. HASH_CR_LKEY_MASK);
  657. }
  658. } else { /* Wrong hash mode */
  659. ret = -EPERM;
  660. dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!",
  661. __func__);
  662. }
  663. return ret;
  664. }
  665. /**
  666. * hash_begin - This routine resets some globals and initializes the hash
  667. * hardware.
  668. * @device_data: Structure for the hash device.
  669. * @ctx: Hash context.
  670. */
  671. void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx)
  672. {
  673. /* HW and SW initializations */
  674. /* Note: there is no need to initialize buffer and digest members */
  675. while (device_data->base->str & HASH_STR_DCAL_MASK)
  676. cpu_relax();
  677. /*
  678. * INIT bit. Set this bit to 0b1 to reset the HASH processor core and
  679. * prepare the initialize the HASH accelerator to compute the message
  680. * digest of a new message.
  681. */
  682. HASH_INITIALIZE;
  683. /*
  684. * NBLW bits. Reset the number of bits in last word (NBLW).
  685. */
  686. HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
  687. }
  688. int hash_process_data(
  689. struct hash_device_data *device_data,
  690. struct hash_ctx *ctx, struct hash_req_ctx *req_ctx,
  691. int msg_length, u8 *data_buffer, u8 *buffer, u8 *index)
  692. {
  693. int ret = 0;
  694. u32 count;
  695. do {
  696. if ((*index + msg_length) < HASH_BLOCK_SIZE) {
  697. for (count = 0; count < msg_length; count++) {
  698. buffer[*index + count] =
  699. *(data_buffer + count);
  700. }
  701. *index += msg_length;
  702. msg_length = 0;
  703. } else {
  704. if (req_ctx->updated) {
  705. ret = hash_resume_state(device_data,
  706. &device_data->state);
  707. memmove(req_ctx->state.buffer,
  708. device_data->state.buffer,
  709. HASH_BLOCK_SIZE / sizeof(u32));
  710. if (ret) {
  711. dev_err(device_data->dev, "[%s] "
  712. "hash_resume_state()"
  713. " failed!", __func__);
  714. goto out;
  715. }
  716. } else {
  717. ret = init_hash_hw(device_data, ctx);
  718. if (ret) {
  719. dev_err(device_data->dev, "[%s] "
  720. "init_hash_hw()"
  721. " failed!", __func__);
  722. goto out;
  723. }
  724. req_ctx->updated = 1;
  725. }
  726. /*
  727. * If 'data_buffer' is four byte aligned and
  728. * local buffer does not have any data, we can
  729. * write data directly from 'data_buffer' to
  730. * HW peripheral, otherwise we first copy data
  731. * to a local buffer
  732. */
  733. if ((0 == (((u32)data_buffer) % 4))
  734. && (0 == *index))
  735. hash_processblock(device_data,
  736. (const u32 *)
  737. data_buffer, HASH_BLOCK_SIZE);
  738. else {
  739. for (count = 0; count <
  740. (u32)(HASH_BLOCK_SIZE -
  741. *index);
  742. count++) {
  743. buffer[*index + count] =
  744. *(data_buffer + count);
  745. }
  746. hash_processblock(device_data,
  747. (const u32 *)buffer,
  748. HASH_BLOCK_SIZE);
  749. }
  750. hash_incrementlength(req_ctx, HASH_BLOCK_SIZE);
  751. data_buffer += (HASH_BLOCK_SIZE - *index);
  752. msg_length -= (HASH_BLOCK_SIZE - *index);
  753. *index = 0;
  754. ret = hash_save_state(device_data,
  755. &device_data->state);
  756. memmove(device_data->state.buffer,
  757. req_ctx->state.buffer,
  758. HASH_BLOCK_SIZE / sizeof(u32));
  759. if (ret) {
  760. dev_err(device_data->dev, "[%s] "
  761. "hash_save_state()"
  762. " failed!", __func__);
  763. goto out;
  764. }
  765. }
  766. } while (msg_length != 0);
  767. out:
  768. return ret;
  769. }
  770. /**
  771. * hash_dma_final - The hash dma final function for SHA1/SHA256.
  772. * @req: The hash request for the job.
  773. */
  774. static int hash_dma_final(struct ahash_request *req)
  775. {
  776. int ret = 0;
  777. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  778. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  779. struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
  780. struct hash_device_data *device_data;
  781. u8 digest[SHA256_DIGEST_SIZE];
  782. int bytes_written = 0;
  783. ret = hash_get_device_data(ctx, &device_data);
  784. if (ret)
  785. return ret;
  786. dev_dbg(device_data->dev, "[%s] (ctx=0x%x)!", __func__, (u32) ctx);
  787. if (req_ctx->updated) {
  788. ret = hash_resume_state(device_data, &device_data->state);
  789. if (ret) {
  790. dev_err(device_data->dev, "[%s] hash_resume_state() "
  791. "failed!", __func__);
  792. goto out;
  793. }
  794. }
  795. if (!req_ctx->updated) {
  796. ret = hash_setconfiguration(device_data, &ctx->config);
  797. if (ret) {
  798. dev_err(device_data->dev, "[%s] "
  799. "hash_setconfiguration() failed!",
  800. __func__);
  801. goto out;
  802. }
  803. /* Enable DMA input */
  804. if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode) {
  805. HASH_CLEAR_BITS(&device_data->base->cr,
  806. HASH_CR_DMAE_MASK);
  807. } else {
  808. HASH_SET_BITS(&device_data->base->cr,
  809. HASH_CR_DMAE_MASK);
  810. HASH_SET_BITS(&device_data->base->cr,
  811. HASH_CR_PRIVN_MASK);
  812. }
  813. HASH_INITIALIZE;
  814. if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
  815. hash_hw_write_key(device_data, ctx->key, ctx->keylen);
  816. /* Number of bits in last word = (nbytes * 8) % 32 */
  817. HASH_SET_NBLW((req->nbytes * 8) % 32);
  818. req_ctx->updated = 1;
  819. }
  820. /* Store the nents in the dma struct. */
  821. ctx->device->dma.nents = hash_get_nents(req->src, req->nbytes, NULL);
  822. if (!ctx->device->dma.nents) {
  823. dev_err(device_data->dev, "[%s] "
  824. "ctx->device->dma.nents = 0", __func__);
  825. goto out;
  826. }
  827. bytes_written = hash_dma_write(ctx, req->src, req->nbytes);
  828. if (bytes_written != req->nbytes) {
  829. dev_err(device_data->dev, "[%s] "
  830. "hash_dma_write() failed!", __func__);
  831. goto out;
  832. }
  833. wait_for_completion(&ctx->device->dma.complete);
  834. hash_dma_done(ctx);
  835. while (device_data->base->str & HASH_STR_DCAL_MASK)
  836. cpu_relax();
  837. if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
  838. unsigned int keylen = ctx->keylen;
  839. u8 *key = ctx->key;
  840. dev_dbg(device_data->dev, "[%s] keylen: %d", __func__,
  841. ctx->keylen);
  842. hash_hw_write_key(device_data, key, keylen);
  843. }
  844. hash_get_digest(device_data, digest, ctx->config.algorithm);
  845. memcpy(req->result, digest, ctx->digestsize);
  846. out:
  847. release_hash_device(device_data);
  848. /**
  849. * Allocated in setkey, and only used in HMAC.
  850. */
  851. kfree(ctx->key);
  852. return ret;
  853. }
  854. /**
  855. * hash_hw_final - The final hash calculation function
  856. * @req: The hash request for the job.
  857. */
  858. int hash_hw_final(struct ahash_request *req)
  859. {
  860. int ret = 0;
  861. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  862. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  863. struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
  864. struct hash_device_data *device_data;
  865. u8 digest[SHA256_DIGEST_SIZE];
  866. ret = hash_get_device_data(ctx, &device_data);
  867. if (ret)
  868. return ret;
  869. dev_dbg(device_data->dev, "[%s] (ctx=0x%x)!", __func__, (u32) ctx);
  870. if (req_ctx->updated) {
  871. ret = hash_resume_state(device_data, &device_data->state);
  872. if (ret) {
  873. dev_err(device_data->dev, "[%s] hash_resume_state() "
  874. "failed!", __func__);
  875. goto out;
  876. }
  877. } else if (req->nbytes == 0 && ctx->keylen == 0) {
  878. u8 zero_hash[SHA256_DIGEST_SIZE];
  879. u32 zero_hash_size = 0;
  880. bool zero_digest = false;
  881. /**
  882. * Use a pre-calculated empty message digest
  883. * (workaround since hw return zeroes, hw bug!?)
  884. */
  885. ret = get_empty_message_digest(device_data, &zero_hash[0],
  886. &zero_hash_size, &zero_digest);
  887. if (!ret && likely(zero_hash_size == ctx->digestsize) &&
  888. zero_digest) {
  889. memcpy(req->result, &zero_hash[0], ctx->digestsize);
  890. goto out;
  891. } else if (!ret && !zero_digest) {
  892. dev_dbg(device_data->dev, "[%s] HMAC zero msg with "
  893. "key, continue...", __func__);
  894. } else {
  895. dev_err(device_data->dev, "[%s] ret=%d, or wrong "
  896. "digest size? %s", __func__, ret,
  897. (zero_hash_size == ctx->digestsize) ?
  898. "true" : "false");
  899. /* Return error */
  900. goto out;
  901. }
  902. } else if (req->nbytes == 0 && ctx->keylen > 0) {
  903. dev_err(device_data->dev, "[%s] Empty message with "
  904. "keylength > 0, NOT supported.", __func__);
  905. goto out;
  906. }
  907. if (!req_ctx->updated) {
  908. ret = init_hash_hw(device_data, ctx);
  909. if (ret) {
  910. dev_err(device_data->dev, "[%s] init_hash_hw() "
  911. "failed!", __func__);
  912. goto out;
  913. }
  914. }
  915. if (req_ctx->state.index) {
  916. hash_messagepad(device_data, req_ctx->state.buffer,
  917. req_ctx->state.index);
  918. } else {
  919. HASH_SET_DCAL;
  920. while (device_data->base->str & HASH_STR_DCAL_MASK)
  921. cpu_relax();
  922. }
  923. if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
  924. unsigned int keylen = ctx->keylen;
  925. u8 *key = ctx->key;
  926. dev_dbg(device_data->dev, "[%s] keylen: %d", __func__,
  927. ctx->keylen);
  928. hash_hw_write_key(device_data, key, keylen);
  929. }
  930. hash_get_digest(device_data, digest, ctx->config.algorithm);
  931. memcpy(req->result, digest, ctx->digestsize);
  932. out:
  933. release_hash_device(device_data);
  934. /**
  935. * Allocated in setkey, and only used in HMAC.
  936. */
  937. kfree(ctx->key);
  938. return ret;
  939. }
  940. /**
  941. * hash_hw_update - Updates current HASH computation hashing another part of
  942. * the message.
  943. * @req: Byte array containing the message to be hashed (caller
  944. * allocated).
  945. */
  946. int hash_hw_update(struct ahash_request *req)
  947. {
  948. int ret = 0;
  949. u8 index = 0;
  950. u8 *buffer;
  951. struct hash_device_data *device_data;
  952. u8 *data_buffer;
  953. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  954. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  955. struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
  956. struct crypto_hash_walk walk;
  957. int msg_length = crypto_hash_walk_first(req, &walk);
  958. /* Empty message ("") is correct indata */
  959. if (msg_length == 0)
  960. return ret;
  961. index = req_ctx->state.index;
  962. buffer = (u8 *)req_ctx->state.buffer;
  963. /* Check if ctx->state.length + msg_length
  964. overflows */
  965. if (msg_length > (req_ctx->state.length.low_word + msg_length) &&
  966. HASH_HIGH_WORD_MAX_VAL ==
  967. req_ctx->state.length.high_word) {
  968. pr_err(DEV_DBG_NAME " [%s] HASH_MSG_LENGTH_OVERFLOW!",
  969. __func__);
  970. return -EPERM;
  971. }
  972. ret = hash_get_device_data(ctx, &device_data);
  973. if (ret)
  974. return ret;
  975. /* Main loop */
  976. while (0 != msg_length) {
  977. data_buffer = walk.data;
  978. ret = hash_process_data(device_data, ctx, req_ctx, msg_length,
  979. data_buffer, buffer, &index);
  980. if (ret) {
  981. dev_err(device_data->dev, "[%s] hash_internal_hw_"
  982. "update() failed!", __func__);
  983. goto out;
  984. }
  985. msg_length = crypto_hash_walk_done(&walk, 0);
  986. }
  987. req_ctx->state.index = index;
  988. dev_dbg(device_data->dev, "[%s] indata length=%d, bin=%d))",
  989. __func__, req_ctx->state.index,
  990. req_ctx->state.bit_index);
  991. out:
  992. release_hash_device(device_data);
  993. return ret;
  994. }
  995. /**
  996. * hash_resume_state - Function that resumes the state of an calculation.
  997. * @device_data: Pointer to the device structure.
  998. * @device_state: The state to be restored in the hash hardware
  999. */
  1000. int hash_resume_state(struct hash_device_data *device_data,
  1001. const struct hash_state *device_state)
  1002. {
  1003. u32 temp_cr;
  1004. s32 count;
  1005. int hash_mode = HASH_OPER_MODE_HASH;
  1006. if (NULL == device_state) {
  1007. dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!",
  1008. __func__);
  1009. return -EPERM;
  1010. }
  1011. /* Check correctness of index and length members */
  1012. if (device_state->index > HASH_BLOCK_SIZE
  1013. || (device_state->length.low_word % HASH_BLOCK_SIZE) != 0) {
  1014. dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!",
  1015. __func__);
  1016. return -EPERM;
  1017. }
  1018. /*
  1019. * INIT bit. Set this bit to 0b1 to reset the HASH processor core and
  1020. * prepare the initialize the HASH accelerator to compute the message
  1021. * digest of a new message.
  1022. */
  1023. HASH_INITIALIZE;
  1024. temp_cr = device_state->temp_cr;
  1025. writel_relaxed(temp_cr & HASH_CR_RESUME_MASK, &device_data->base->cr);
  1026. if (device_data->base->cr & HASH_CR_MODE_MASK)
  1027. hash_mode = HASH_OPER_MODE_HMAC;
  1028. else
  1029. hash_mode = HASH_OPER_MODE_HASH;
  1030. for (count = 0; count < HASH_CSR_COUNT; count++) {
  1031. if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
  1032. break;
  1033. writel_relaxed(device_state->csr[count],
  1034. &device_data->base->csrx[count]);
  1035. }
  1036. writel_relaxed(device_state->csfull, &device_data->base->csfull);
  1037. writel_relaxed(device_state->csdatain, &device_data->base->csdatain);
  1038. writel_relaxed(device_state->str_reg, &device_data->base->str);
  1039. writel_relaxed(temp_cr, &device_data->base->cr);
  1040. return 0;
  1041. }
  1042. /**
  1043. * hash_save_state - Function that saves the state of hardware.
  1044. * @device_data: Pointer to the device structure.
  1045. * @device_state: The strucure where the hardware state should be saved.
  1046. */
  1047. int hash_save_state(struct hash_device_data *device_data,
  1048. struct hash_state *device_state)
  1049. {
  1050. u32 temp_cr;
  1051. u32 count;
  1052. int hash_mode = HASH_OPER_MODE_HASH;
  1053. if (NULL == device_state) {
  1054. dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!",
  1055. __func__);
  1056. return -ENOTSUPP;
  1057. }
  1058. /* Write dummy value to force digest intermediate calculation. This
  1059. * actually makes sure that there isn't any ongoing calculation in the
  1060. * hardware.
  1061. */
  1062. while (device_data->base->str & HASH_STR_DCAL_MASK)
  1063. cpu_relax();
  1064. temp_cr = readl_relaxed(&device_data->base->cr);
  1065. device_state->str_reg = readl_relaxed(&device_data->base->str);
  1066. device_state->din_reg = readl_relaxed(&device_data->base->din);
  1067. if (device_data->base->cr & HASH_CR_MODE_MASK)
  1068. hash_mode = HASH_OPER_MODE_HMAC;
  1069. else
  1070. hash_mode = HASH_OPER_MODE_HASH;
  1071. for (count = 0; count < HASH_CSR_COUNT; count++) {
  1072. if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
  1073. break;
  1074. device_state->csr[count] =
  1075. readl_relaxed(&device_data->base->csrx[count]);
  1076. }
  1077. device_state->csfull = readl_relaxed(&device_data->base->csfull);
  1078. device_state->csdatain = readl_relaxed(&device_data->base->csdatain);
  1079. device_state->temp_cr = temp_cr;
  1080. return 0;
  1081. }
  1082. /**
  1083. * hash_check_hw - This routine checks for peripheral Ids and PCell Ids.
  1084. * @device_data:
  1085. *
  1086. */
  1087. int hash_check_hw(struct hash_device_data *device_data)
  1088. {
  1089. /* Checking Peripheral Ids */
  1090. if (HASH_P_ID0 == readl_relaxed(&device_data->base->periphid0)
  1091. && HASH_P_ID1 == readl_relaxed(&device_data->base->periphid1)
  1092. && HASH_P_ID2 == readl_relaxed(&device_data->base->periphid2)
  1093. && HASH_P_ID3 == readl_relaxed(&device_data->base->periphid3)
  1094. && HASH_CELL_ID0 == readl_relaxed(&device_data->base->cellid0)
  1095. && HASH_CELL_ID1 == readl_relaxed(&device_data->base->cellid1)
  1096. && HASH_CELL_ID2 == readl_relaxed(&device_data->base->cellid2)
  1097. && HASH_CELL_ID3 == readl_relaxed(&device_data->base->cellid3)
  1098. ) {
  1099. return 0;
  1100. }
  1101. dev_err(device_data->dev, "[%s] HASH_UNSUPPORTED_HW!",
  1102. __func__);
  1103. return -ENOTSUPP;
  1104. }
  1105. /**
  1106. * hash_get_digest - Gets the digest.
  1107. * @device_data: Pointer to the device structure.
  1108. * @digest: User allocated byte array for the calculated digest.
  1109. * @algorithm: The algorithm in use.
  1110. */
  1111. void hash_get_digest(struct hash_device_data *device_data,
  1112. u8 *digest, int algorithm)
  1113. {
  1114. u32 temp_hx_val, count;
  1115. int loop_ctr;
  1116. if (algorithm != HASH_ALGO_SHA1 && algorithm != HASH_ALGO_SHA256) {
  1117. dev_err(device_data->dev, "[%s] Incorrect algorithm %d",
  1118. __func__, algorithm);
  1119. return;
  1120. }
  1121. if (algorithm == HASH_ALGO_SHA1)
  1122. loop_ctr = SHA1_DIGEST_SIZE / sizeof(u32);
  1123. else
  1124. loop_ctr = SHA256_DIGEST_SIZE / sizeof(u32);
  1125. dev_dbg(device_data->dev, "[%s] digest array:(0x%x)",
  1126. __func__, (u32) digest);
  1127. /* Copy result into digest array */
  1128. for (count = 0; count < loop_ctr; count++) {
  1129. temp_hx_val = readl_relaxed(&device_data->base->hx[count]);
  1130. digest[count * 4] = (u8) ((temp_hx_val >> 24) & 0xFF);
  1131. digest[count * 4 + 1] = (u8) ((temp_hx_val >> 16) & 0xFF);
  1132. digest[count * 4 + 2] = (u8) ((temp_hx_val >> 8) & 0xFF);
  1133. digest[count * 4 + 3] = (u8) ((temp_hx_val >> 0) & 0xFF);
  1134. }
  1135. }
  1136. /**
  1137. * hash_update - The hash update function for SHA1/SHA2 (SHA256).
  1138. * @req: The hash request for the job.
  1139. */
  1140. static int ahash_update(struct ahash_request *req)
  1141. {
  1142. int ret = 0;
  1143. struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
  1144. if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode)
  1145. ret = hash_hw_update(req);
  1146. /* Skip update for DMA, all data will be passed to DMA in final */
  1147. if (ret) {
  1148. pr_err(DEV_DBG_NAME " [%s] hash_hw_update() failed!",
  1149. __func__);
  1150. }
  1151. return ret;
  1152. }
  1153. /**
  1154. * hash_final - The hash final function for SHA1/SHA2 (SHA256).
  1155. * @req: The hash request for the job.
  1156. */
  1157. static int ahash_final(struct ahash_request *req)
  1158. {
  1159. int ret = 0;
  1160. struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
  1161. pr_debug(DEV_DBG_NAME " [%s] data size: %d", __func__, req->nbytes);
  1162. if ((hash_mode == HASH_MODE_DMA) && req_ctx->dma_mode)
  1163. ret = hash_dma_final(req);
  1164. else
  1165. ret = hash_hw_final(req);
  1166. if (ret) {
  1167. pr_err(DEV_DBG_NAME " [%s] hash_hw/dma_final() failed",
  1168. __func__);
  1169. }
  1170. return ret;
  1171. }
  1172. static int hash_setkey(struct crypto_ahash *tfm,
  1173. const u8 *key, unsigned int keylen, int alg)
  1174. {
  1175. int ret = 0;
  1176. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  1177. /**
  1178. * Freed in final.
  1179. */
  1180. ctx->key = kmalloc(keylen, GFP_KERNEL);
  1181. if (!ctx->key) {
  1182. pr_err(DEV_DBG_NAME " [%s] Failed to allocate ctx->key "
  1183. "for %d\n", __func__, alg);
  1184. return -ENOMEM;
  1185. }
  1186. memcpy(ctx->key, key, keylen);
  1187. ctx->keylen = keylen;
  1188. return ret;
  1189. }
  1190. static int ahash_sha1_init(struct ahash_request *req)
  1191. {
  1192. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1193. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  1194. ctx->config.data_format = HASH_DATA_8_BITS;
  1195. ctx->config.algorithm = HASH_ALGO_SHA1;
  1196. ctx->config.oper_mode = HASH_OPER_MODE_HASH;
  1197. ctx->digestsize = SHA1_DIGEST_SIZE;
  1198. return hash_init(req);
  1199. }
  1200. static int ahash_sha256_init(struct ahash_request *req)
  1201. {
  1202. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1203. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  1204. ctx->config.data_format = HASH_DATA_8_BITS;
  1205. ctx->config.algorithm = HASH_ALGO_SHA256;
  1206. ctx->config.oper_mode = HASH_OPER_MODE_HASH;
  1207. ctx->digestsize = SHA256_DIGEST_SIZE;
  1208. return hash_init(req);
  1209. }
  1210. static int ahash_sha1_digest(struct ahash_request *req)
  1211. {
  1212. int ret2, ret1;
  1213. ret1 = ahash_sha1_init(req);
  1214. if (ret1)
  1215. goto out;
  1216. ret1 = ahash_update(req);
  1217. ret2 = ahash_final(req);
  1218. out:
  1219. return ret1 ? ret1 : ret2;
  1220. }
  1221. static int ahash_sha256_digest(struct ahash_request *req)
  1222. {
  1223. int ret2, ret1;
  1224. ret1 = ahash_sha256_init(req);
  1225. if (ret1)
  1226. goto out;
  1227. ret1 = ahash_update(req);
  1228. ret2 = ahash_final(req);
  1229. out:
  1230. return ret1 ? ret1 : ret2;
  1231. }
  1232. static int hmac_sha1_init(struct ahash_request *req)
  1233. {
  1234. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1235. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  1236. ctx->config.data_format = HASH_DATA_8_BITS;
  1237. ctx->config.algorithm = HASH_ALGO_SHA1;
  1238. ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
  1239. ctx->digestsize = SHA1_DIGEST_SIZE;
  1240. return hash_init(req);
  1241. }
  1242. static int hmac_sha256_init(struct ahash_request *req)
  1243. {
  1244. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1245. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  1246. ctx->config.data_format = HASH_DATA_8_BITS;
  1247. ctx->config.algorithm = HASH_ALGO_SHA256;
  1248. ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
  1249. ctx->digestsize = SHA256_DIGEST_SIZE;
  1250. return hash_init(req);
  1251. }
  1252. static int hmac_sha1_digest(struct ahash_request *req)
  1253. {
  1254. int ret2, ret1;
  1255. ret1 = hmac_sha1_init(req);
  1256. if (ret1)
  1257. goto out;
  1258. ret1 = ahash_update(req);
  1259. ret2 = ahash_final(req);
  1260. out:
  1261. return ret1 ? ret1 : ret2;
  1262. }
  1263. static int hmac_sha256_digest(struct ahash_request *req)
  1264. {
  1265. int ret2, ret1;
  1266. ret1 = hmac_sha256_init(req);
  1267. if (ret1)
  1268. goto out;
  1269. ret1 = ahash_update(req);
  1270. ret2 = ahash_final(req);
  1271. out:
  1272. return ret1 ? ret1 : ret2;
  1273. }
  1274. static int hmac_sha1_setkey(struct crypto_ahash *tfm,
  1275. const u8 *key, unsigned int keylen)
  1276. {
  1277. return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA1);
  1278. }
  1279. static int hmac_sha256_setkey(struct crypto_ahash *tfm,
  1280. const u8 *key, unsigned int keylen)
  1281. {
  1282. return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA256);
  1283. }
  1284. struct hash_algo_template {
  1285. struct hash_config conf;
  1286. struct ahash_alg hash;
  1287. };
  1288. static int hash_cra_init(struct crypto_tfm *tfm)
  1289. {
  1290. struct hash_ctx *ctx = crypto_tfm_ctx(tfm);
  1291. struct crypto_alg *alg = tfm->__crt_alg;
  1292. struct hash_algo_template *hash_alg;
  1293. hash_alg = container_of(__crypto_ahash_alg(alg),
  1294. struct hash_algo_template,
  1295. hash);
  1296. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  1297. sizeof(struct hash_req_ctx));
  1298. ctx->config.data_format = HASH_DATA_8_BITS;
  1299. ctx->config.algorithm = hash_alg->conf.algorithm;
  1300. ctx->config.oper_mode = hash_alg->conf.oper_mode;
  1301. ctx->digestsize = hash_alg->hash.halg.digestsize;
  1302. return 0;
  1303. }
  1304. static struct hash_algo_template hash_algs[] = {
  1305. {
  1306. .conf.algorithm = HASH_ALGO_SHA1,
  1307. .conf.oper_mode = HASH_OPER_MODE_HASH,
  1308. .hash = {
  1309. .init = hash_init,
  1310. .update = ahash_update,
  1311. .final = ahash_final,
  1312. .digest = ahash_sha1_digest,
  1313. .halg.digestsize = SHA1_DIGEST_SIZE,
  1314. .halg.statesize = sizeof(struct hash_ctx),
  1315. .halg.base = {
  1316. .cra_name = "sha1",
  1317. .cra_driver_name = "sha1-ux500",
  1318. .cra_flags = CRYPTO_ALG_TYPE_AHASH |
  1319. CRYPTO_ALG_ASYNC,
  1320. .cra_blocksize = SHA1_BLOCK_SIZE,
  1321. .cra_ctxsize = sizeof(struct hash_ctx),
  1322. .cra_init = hash_cra_init,
  1323. .cra_module = THIS_MODULE,
  1324. }
  1325. }
  1326. },
  1327. {
  1328. .conf.algorithm = HASH_ALGO_SHA256,
  1329. .conf.oper_mode = HASH_OPER_MODE_HASH,
  1330. .hash = {
  1331. .init = hash_init,
  1332. .update = ahash_update,
  1333. .final = ahash_final,
  1334. .digest = ahash_sha256_digest,
  1335. .halg.digestsize = SHA256_DIGEST_SIZE,
  1336. .halg.statesize = sizeof(struct hash_ctx),
  1337. .halg.base = {
  1338. .cra_name = "sha256",
  1339. .cra_driver_name = "sha256-ux500",
  1340. .cra_flags = CRYPTO_ALG_TYPE_AHASH |
  1341. CRYPTO_ALG_ASYNC,
  1342. .cra_blocksize = SHA256_BLOCK_SIZE,
  1343. .cra_ctxsize = sizeof(struct hash_ctx),
  1344. .cra_type = &crypto_ahash_type,
  1345. .cra_init = hash_cra_init,
  1346. .cra_module = THIS_MODULE,
  1347. }
  1348. }
  1349. },
  1350. {
  1351. .conf.algorithm = HASH_ALGO_SHA1,
  1352. .conf.oper_mode = HASH_OPER_MODE_HMAC,
  1353. .hash = {
  1354. .init = hash_init,
  1355. .update = ahash_update,
  1356. .final = ahash_final,
  1357. .digest = hmac_sha1_digest,
  1358. .setkey = hmac_sha1_setkey,
  1359. .halg.digestsize = SHA1_DIGEST_SIZE,
  1360. .halg.statesize = sizeof(struct hash_ctx),
  1361. .halg.base = {
  1362. .cra_name = "hmac(sha1)",
  1363. .cra_driver_name = "hmac-sha1-ux500",
  1364. .cra_flags = CRYPTO_ALG_TYPE_AHASH |
  1365. CRYPTO_ALG_ASYNC,
  1366. .cra_blocksize = SHA1_BLOCK_SIZE,
  1367. .cra_ctxsize = sizeof(struct hash_ctx),
  1368. .cra_type = &crypto_ahash_type,
  1369. .cra_init = hash_cra_init,
  1370. .cra_module = THIS_MODULE,
  1371. }
  1372. }
  1373. },
  1374. {
  1375. .conf.algorithm = HASH_ALGO_SHA256,
  1376. .conf.oper_mode = HASH_OPER_MODE_HMAC,
  1377. .hash = {
  1378. .init = hash_init,
  1379. .update = ahash_update,
  1380. .final = ahash_final,
  1381. .digest = hmac_sha256_digest,
  1382. .setkey = hmac_sha256_setkey,
  1383. .halg.digestsize = SHA256_DIGEST_SIZE,
  1384. .halg.statesize = sizeof(struct hash_ctx),
  1385. .halg.base = {
  1386. .cra_name = "hmac(sha256)",
  1387. .cra_driver_name = "hmac-sha256-ux500",
  1388. .cra_flags = CRYPTO_ALG_TYPE_AHASH |
  1389. CRYPTO_ALG_ASYNC,
  1390. .cra_blocksize = SHA256_BLOCK_SIZE,
  1391. .cra_ctxsize = sizeof(struct hash_ctx),
  1392. .cra_type = &crypto_ahash_type,
  1393. .cra_init = hash_cra_init,
  1394. .cra_module = THIS_MODULE,
  1395. }
  1396. }
  1397. }
  1398. };
  1399. /**
  1400. * hash_algs_register_all -
  1401. */
  1402. static int ahash_algs_register_all(struct hash_device_data *device_data)
  1403. {
  1404. int ret;
  1405. int i;
  1406. int count;
  1407. for (i = 0; i < ARRAY_SIZE(hash_algs); i++) {
  1408. ret = crypto_register_ahash(&hash_algs[i].hash);
  1409. if (ret) {
  1410. count = i;
  1411. dev_err(device_data->dev, "[%s] alg registration failed",
  1412. hash_algs[i].hash.halg.base.cra_driver_name);
  1413. goto unreg;
  1414. }
  1415. }
  1416. return 0;
  1417. unreg:
  1418. for (i = 0; i < count; i++)
  1419. crypto_unregister_ahash(&hash_algs[i].hash);
  1420. return ret;
  1421. }
  1422. /**
  1423. * hash_algs_unregister_all -
  1424. */
  1425. static void ahash_algs_unregister_all(struct hash_device_data *device_data)
  1426. {
  1427. int i;
  1428. for (i = 0; i < ARRAY_SIZE(hash_algs); i++)
  1429. crypto_unregister_ahash(&hash_algs[i].hash);
  1430. }
  1431. /**
  1432. * ux500_hash_probe - Function that probes the hash hardware.
  1433. * @pdev: The platform device.
  1434. */
  1435. static int ux500_hash_probe(struct platform_device *pdev)
  1436. {
  1437. int ret = 0;
  1438. struct resource *res = NULL;
  1439. struct hash_device_data *device_data;
  1440. struct device *dev = &pdev->dev;
  1441. device_data = kzalloc(sizeof(struct hash_device_data), GFP_ATOMIC);
  1442. if (!device_data) {
  1443. dev_dbg(dev, "[%s] kzalloc() failed!", __func__);
  1444. ret = -ENOMEM;
  1445. goto out;
  1446. }
  1447. device_data->dev = dev;
  1448. device_data->current_ctx = NULL;
  1449. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1450. if (!res) {
  1451. dev_dbg(dev, "[%s] platform_get_resource() failed!", __func__);
  1452. ret = -ENODEV;
  1453. goto out_kfree;
  1454. }
  1455. res = request_mem_region(res->start, resource_size(res), pdev->name);
  1456. if (res == NULL) {
  1457. dev_dbg(dev, "[%s] request_mem_region() failed!", __func__);
  1458. ret = -EBUSY;
  1459. goto out_kfree;
  1460. }
  1461. device_data->base = ioremap(res->start, resource_size(res));
  1462. if (!device_data->base) {
  1463. dev_err(dev, "[%s] ioremap() failed!",
  1464. __func__);
  1465. ret = -ENOMEM;
  1466. goto out_free_mem;
  1467. }
  1468. spin_lock_init(&device_data->ctx_lock);
  1469. spin_lock_init(&device_data->power_state_lock);
  1470. /* Enable power for HASH1 hardware block */
  1471. device_data->regulator = regulator_get(dev, "v-ape");
  1472. if (IS_ERR(device_data->regulator)) {
  1473. dev_err(dev, "[%s] regulator_get() failed!", __func__);
  1474. ret = PTR_ERR(device_data->regulator);
  1475. device_data->regulator = NULL;
  1476. goto out_unmap;
  1477. }
  1478. /* Enable the clock for HASH1 hardware block */
  1479. device_data->clk = clk_get(dev, NULL);
  1480. if (IS_ERR(device_data->clk)) {
  1481. dev_err(dev, "[%s] clk_get() failed!", __func__);
  1482. ret = PTR_ERR(device_data->clk);
  1483. goto out_regulator;
  1484. }
  1485. /* Enable device power (and clock) */
  1486. ret = hash_enable_power(device_data, false);
  1487. if (ret) {
  1488. dev_err(dev, "[%s]: hash_enable_power() failed!", __func__);
  1489. goto out_clk;
  1490. }
  1491. ret = hash_check_hw(device_data);
  1492. if (ret) {
  1493. dev_err(dev, "[%s] hash_check_hw() failed!", __func__);
  1494. goto out_power;
  1495. }
  1496. if (hash_mode == HASH_MODE_DMA)
  1497. hash_dma_setup_channel(device_data, dev);
  1498. platform_set_drvdata(pdev, device_data);
  1499. /* Put the new device into the device list... */
  1500. klist_add_tail(&device_data->list_node, &driver_data.device_list);
  1501. /* ... and signal that a new device is available. */
  1502. up(&driver_data.device_allocation);
  1503. ret = ahash_algs_register_all(device_data);
  1504. if (ret) {
  1505. dev_err(dev, "[%s] ahash_algs_register_all() "
  1506. "failed!", __func__);
  1507. goto out_power;
  1508. }
  1509. dev_info(dev, "[%s] successfully probed\n", __func__);
  1510. return 0;
  1511. out_power:
  1512. hash_disable_power(device_data, false);
  1513. out_clk:
  1514. clk_put(device_data->clk);
  1515. out_regulator:
  1516. regulator_put(device_data->regulator);
  1517. out_unmap:
  1518. iounmap(device_data->base);
  1519. out_free_mem:
  1520. release_mem_region(res->start, resource_size(res));
  1521. out_kfree:
  1522. kfree(device_data);
  1523. out:
  1524. return ret;
  1525. }
  1526. /**
  1527. * ux500_hash_remove - Function that removes the hash device from the platform.
  1528. * @pdev: The platform device.
  1529. */
  1530. static int ux500_hash_remove(struct platform_device *pdev)
  1531. {
  1532. struct resource *res;
  1533. struct hash_device_data *device_data;
  1534. struct device *dev = &pdev->dev;
  1535. device_data = platform_get_drvdata(pdev);
  1536. if (!device_data) {
  1537. dev_err(dev, "[%s]: platform_get_drvdata() failed!",
  1538. __func__);
  1539. return -ENOMEM;
  1540. }
  1541. /* Try to decrease the number of available devices. */
  1542. if (down_trylock(&driver_data.device_allocation))
  1543. return -EBUSY;
  1544. /* Check that the device is free */
  1545. spin_lock(&device_data->ctx_lock);
  1546. /* current_ctx allocates a device, NULL = unallocated */
  1547. if (device_data->current_ctx) {
  1548. /* The device is busy */
  1549. spin_unlock(&device_data->ctx_lock);
  1550. /* Return the device to the pool. */
  1551. up(&driver_data.device_allocation);
  1552. return -EBUSY;
  1553. }
  1554. spin_unlock(&device_data->ctx_lock);
  1555. /* Remove the device from the list */
  1556. if (klist_node_attached(&device_data->list_node))
  1557. klist_remove(&device_data->list_node);
  1558. /* If this was the last device, remove the services */
  1559. if (list_empty(&driver_data.device_list.k_list))
  1560. ahash_algs_unregister_all(device_data);
  1561. if (hash_disable_power(device_data, false))
  1562. dev_err(dev, "[%s]: hash_disable_power() failed",
  1563. __func__);
  1564. clk_put(device_data->clk);
  1565. regulator_put(device_data->regulator);
  1566. iounmap(device_data->base);
  1567. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1568. if (res)
  1569. release_mem_region(res->start, resource_size(res));
  1570. kfree(device_data);
  1571. return 0;
  1572. }
  1573. /**
  1574. * ux500_hash_shutdown - Function that shutdown the hash device.
  1575. * @pdev: The platform device
  1576. */
  1577. static void ux500_hash_shutdown(struct platform_device *pdev)
  1578. {
  1579. struct resource *res = NULL;
  1580. struct hash_device_data *device_data;
  1581. device_data = platform_get_drvdata(pdev);
  1582. if (!device_data) {
  1583. dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!",
  1584. __func__);
  1585. return;
  1586. }
  1587. /* Check that the device is free */
  1588. spin_lock(&device_data->ctx_lock);
  1589. /* current_ctx allocates a device, NULL = unallocated */
  1590. if (!device_data->current_ctx) {
  1591. if (down_trylock(&driver_data.device_allocation))
  1592. dev_dbg(&pdev->dev, "[%s]: Cryp still in use!"
  1593. "Shutting down anyway...", __func__);
  1594. /**
  1595. * (Allocate the device)
  1596. * Need to set this to non-null (dummy) value,
  1597. * to avoid usage if context switching.
  1598. */
  1599. device_data->current_ctx++;
  1600. }
  1601. spin_unlock(&device_data->ctx_lock);
  1602. /* Remove the device from the list */
  1603. if (klist_node_attached(&device_data->list_node))
  1604. klist_remove(&device_data->list_node);
  1605. /* If this was the last device, remove the services */
  1606. if (list_empty(&driver_data.device_list.k_list))
  1607. ahash_algs_unregister_all(device_data);
  1608. iounmap(device_data->base);
  1609. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1610. if (res)
  1611. release_mem_region(res->start, resource_size(res));
  1612. if (hash_disable_power(device_data, false))
  1613. dev_err(&pdev->dev, "[%s] hash_disable_power() failed",
  1614. __func__);
  1615. }
  1616. /**
  1617. * ux500_hash_suspend - Function that suspends the hash device.
  1618. * @pdev: The platform device.
  1619. * @state: -
  1620. */
  1621. static int ux500_hash_suspend(struct platform_device *pdev, pm_message_t state)
  1622. {
  1623. int ret;
  1624. struct hash_device_data *device_data;
  1625. struct hash_ctx *temp_ctx = NULL;
  1626. device_data = platform_get_drvdata(pdev);
  1627. if (!device_data) {
  1628. dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!",
  1629. __func__);
  1630. return -ENOMEM;
  1631. }
  1632. spin_lock(&device_data->ctx_lock);
  1633. if (!device_data->current_ctx)
  1634. device_data->current_ctx++;
  1635. spin_unlock(&device_data->ctx_lock);
  1636. if (device_data->current_ctx == ++temp_ctx) {
  1637. if (down_interruptible(&driver_data.device_allocation))
  1638. dev_dbg(&pdev->dev, "[%s]: down_interruptible() "
  1639. "failed", __func__);
  1640. ret = hash_disable_power(device_data, false);
  1641. } else
  1642. ret = hash_disable_power(device_data, true);
  1643. if (ret)
  1644. dev_err(&pdev->dev, "[%s]: hash_disable_power()", __func__);
  1645. return ret;
  1646. }
  1647. /**
  1648. * ux500_hash_resume - Function that resume the hash device.
  1649. * @pdev: The platform device.
  1650. */
  1651. static int ux500_hash_resume(struct platform_device *pdev)
  1652. {
  1653. int ret = 0;
  1654. struct hash_device_data *device_data;
  1655. struct hash_ctx *temp_ctx = NULL;
  1656. device_data = platform_get_drvdata(pdev);
  1657. if (!device_data) {
  1658. dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!",
  1659. __func__);
  1660. return -ENOMEM;
  1661. }
  1662. spin_lock(&device_data->ctx_lock);
  1663. if (device_data->current_ctx == ++temp_ctx)
  1664. device_data->current_ctx = NULL;
  1665. spin_unlock(&device_data->ctx_lock);
  1666. if (!device_data->current_ctx)
  1667. up(&driver_data.device_allocation);
  1668. else
  1669. ret = hash_enable_power(device_data, true);
  1670. if (ret)
  1671. dev_err(&pdev->dev, "[%s]: hash_enable_power() failed!",
  1672. __func__);
  1673. return ret;
  1674. }
  1675. static struct platform_driver hash_driver = {
  1676. .probe = ux500_hash_probe,
  1677. .remove = ux500_hash_remove,
  1678. .shutdown = ux500_hash_shutdown,
  1679. .suspend = ux500_hash_suspend,
  1680. .resume = ux500_hash_resume,
  1681. .driver = {
  1682. .owner = THIS_MODULE,
  1683. .name = "hash1",
  1684. }
  1685. };
  1686. /**
  1687. * ux500_hash_mod_init - The kernel module init function.
  1688. */
  1689. static int __init ux500_hash_mod_init(void)
  1690. {
  1691. klist_init(&driver_data.device_list, NULL, NULL);
  1692. /* Initialize the semaphore to 0 devices (locked state) */
  1693. sema_init(&driver_data.device_allocation, 0);
  1694. return platform_driver_register(&hash_driver);
  1695. }
  1696. /**
  1697. * ux500_hash_mod_fini - The kernel module exit function.
  1698. */
  1699. static void __exit ux500_hash_mod_fini(void)
  1700. {
  1701. platform_driver_unregister(&hash_driver);
  1702. return;
  1703. }
  1704. module_init(ux500_hash_mod_init);
  1705. module_exit(ux500_hash_mod_fini);
  1706. MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 HASH engine.");
  1707. MODULE_LICENSE("GPL");
  1708. MODULE_ALIAS("sha1-all");
  1709. MODULE_ALIAS("sha256-all");
  1710. MODULE_ALIAS("hmac-sha1-all");
  1711. MODULE_ALIAS("hmac-sha256-all");