hash_core.c 51 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009
  1. /*
  2. * Cryptographic API.
  3. * Support for Nomadik hardware crypto engine.
  4. * Copyright (C) ST-Ericsson SA 2010
  5. * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson
  6. * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson
  7. * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
  8. * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
  9. * Author: Andreas Westin <andreas.westin@stericsson.com> for ST-Ericsson.
  10. * License terms: GNU General Public License (GPL) version 2
  11. */
  12. #include <linux/clk.h>
  13. #include <linux/device.h>
  14. #include <linux/err.h>
  15. #include <linux/init.h>
  16. #include <linux/io.h>
  17. #include <linux/klist.h>
  18. #include <linux/kernel.h>
  19. #include <linux/module.h>
  20. #include <linux/platform_device.h>
  21. #include <linux/crypto.h>
  22. #include <linux/regulator/consumer.h>
  23. #include <linux/dmaengine.h>
  24. #include <linux/bitops.h>
  25. #include <crypto/internal/hash.h>
  26. #include <crypto/sha.h>
  27. #include <crypto/scatterwalk.h>
  28. #include <crypto/algapi.h>
  29. #include <mach/crypto-ux500.h>
  30. #include <mach/hardware.h>
  31. #include "hash_alg.h"
  32. #define DEV_DBG_NAME "hashX hashX:"
  33. static int hash_mode;
  34. module_param(hash_mode, int, 0);
  35. MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1");
  36. /**
  37. * Pre-calculated empty message digests.
  38. */
  39. static u8 zero_message_hash_sha1[SHA1_DIGEST_SIZE] = {
  40. 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d,
  41. 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90,
  42. 0xaf, 0xd8, 0x07, 0x09
  43. };
  44. static u8 zero_message_hash_sha256[SHA256_DIGEST_SIZE] = {
  45. 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14,
  46. 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24,
  47. 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c,
  48. 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55
  49. };
  50. /* HMAC-SHA1, no key */
  51. static u8 zero_message_hmac_sha1[SHA1_DIGEST_SIZE] = {
  52. 0xfb, 0xdb, 0x1d, 0x1b, 0x18, 0xaa, 0x6c, 0x08,
  53. 0x32, 0x4b, 0x7d, 0x64, 0xb7, 0x1f, 0xb7, 0x63,
  54. 0x70, 0x69, 0x0e, 0x1d
  55. };
  56. /* HMAC-SHA256, no key */
  57. static u8 zero_message_hmac_sha256[SHA256_DIGEST_SIZE] = {
  58. 0xb6, 0x13, 0x67, 0x9a, 0x08, 0x14, 0xd9, 0xec,
  59. 0x77, 0x2f, 0x95, 0xd7, 0x78, 0xc3, 0x5f, 0xc5,
  60. 0xff, 0x16, 0x97, 0xc4, 0x93, 0x71, 0x56, 0x53,
  61. 0xc6, 0xc7, 0x12, 0x14, 0x42, 0x92, 0xc5, 0xad
  62. };
  63. /**
  64. * struct hash_driver_data - data specific to the driver.
  65. *
  66. * @device_list: A list of registered devices to choose from.
  67. * @device_allocation: A semaphore initialized with number of devices.
  68. */
  69. struct hash_driver_data {
  70. struct klist device_list;
  71. struct semaphore device_allocation;
  72. };
  73. static struct hash_driver_data driver_data;
  74. /* Declaration of functions */
  75. /**
  76. * hash_messagepad - Pads a message and write the nblw bits.
  77. * @device_data: Structure for the hash device.
  78. * @message: Last word of a message
  79. * @index_bytes: The number of bytes in the last message
  80. *
  81. * This function manages the final part of the digest calculation, when less
  82. * than 512 bits (64 bytes) remain in message. This means index_bytes < 64.
  83. *
  84. */
  85. static void hash_messagepad(struct hash_device_data *device_data,
  86. const u32 *message, u8 index_bytes);
  87. /**
  88. * release_hash_device - Releases a previously allocated hash device.
  89. * @device_data: Structure for the hash device.
  90. *
  91. */
  92. static void release_hash_device(struct hash_device_data *device_data)
  93. {
  94. spin_lock(&device_data->ctx_lock);
  95. device_data->current_ctx->device = NULL;
  96. device_data->current_ctx = NULL;
  97. spin_unlock(&device_data->ctx_lock);
  98. /*
  99. * The down_interruptible part for this semaphore is called in
  100. * cryp_get_device_data.
  101. */
  102. up(&driver_data.device_allocation);
  103. }
  104. static void hash_dma_setup_channel(struct hash_device_data *device_data,
  105. struct device *dev)
  106. {
  107. struct hash_platform_data *platform_data = dev->platform_data;
  108. dma_cap_zero(device_data->dma.mask);
  109. dma_cap_set(DMA_SLAVE, device_data->dma.mask);
  110. device_data->dma.cfg_mem2hash = platform_data->mem_to_engine;
  111. device_data->dma.chan_mem2hash =
  112. dma_request_channel(device_data->dma.mask,
  113. platform_data->dma_filter,
  114. device_data->dma.cfg_mem2hash);
  115. init_completion(&device_data->dma.complete);
  116. }
  117. static void hash_dma_callback(void *data)
  118. {
  119. struct hash_ctx *ctx = (struct hash_ctx *) data;
  120. complete(&ctx->device->dma.complete);
  121. }
  122. static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
  123. int len, enum dma_data_direction direction)
  124. {
  125. struct dma_async_tx_descriptor *desc = NULL;
  126. struct dma_chan *channel = NULL;
  127. dma_cookie_t cookie;
  128. if (direction != DMA_TO_DEVICE) {
  129. dev_err(ctx->device->dev, "[%s] Invalid DMA direction",
  130. __func__);
  131. return -EFAULT;
  132. }
  133. sg->length = ALIGN(sg->length, HASH_DMA_ALIGN_SIZE);
  134. channel = ctx->device->dma.chan_mem2hash;
  135. ctx->device->dma.sg = sg;
  136. ctx->device->dma.sg_len = dma_map_sg(channel->device->dev,
  137. ctx->device->dma.sg, ctx->device->dma.nents,
  138. direction);
  139. if (!ctx->device->dma.sg_len) {
  140. dev_err(ctx->device->dev,
  141. "[%s]: Could not map the sg list (TO_DEVICE)",
  142. __func__);
  143. return -EFAULT;
  144. }
  145. dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer "
  146. "(TO_DEVICE)", __func__);
  147. desc = channel->device->device_prep_slave_sg(channel,
  148. ctx->device->dma.sg, ctx->device->dma.sg_len,
  149. direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT, NULL);
  150. if (!desc) {
  151. dev_err(ctx->device->dev,
  152. "[%s]: device_prep_slave_sg() failed!", __func__);
  153. return -EFAULT;
  154. }
  155. desc->callback = hash_dma_callback;
  156. desc->callback_param = ctx;
  157. cookie = desc->tx_submit(desc);
  158. dma_async_issue_pending(channel);
  159. return 0;
  160. }
  161. static void hash_dma_done(struct hash_ctx *ctx)
  162. {
  163. struct dma_chan *chan;
  164. chan = ctx->device->dma.chan_mem2hash;
  165. chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
  166. dma_unmap_sg(chan->device->dev, ctx->device->dma.sg,
  167. ctx->device->dma.sg_len, DMA_TO_DEVICE);
  168. }
  169. static int hash_dma_write(struct hash_ctx *ctx,
  170. struct scatterlist *sg, int len)
  171. {
  172. int error = hash_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE);
  173. if (error) {
  174. dev_dbg(ctx->device->dev, "[%s]: hash_set_dma_transfer() "
  175. "failed", __func__);
  176. return error;
  177. }
  178. return len;
  179. }
  180. /**
  181. * get_empty_message_digest - Returns a pre-calculated digest for
  182. * the empty message.
  183. * @device_data: Structure for the hash device.
  184. * @zero_hash: Buffer to return the empty message digest.
  185. * @zero_hash_size: Hash size of the empty message digest.
  186. * @zero_digest: True if zero_digest returned.
  187. */
  188. static int get_empty_message_digest(
  189. struct hash_device_data *device_data,
  190. u8 *zero_hash, u32 *zero_hash_size, bool *zero_digest)
  191. {
  192. int ret = 0;
  193. struct hash_ctx *ctx = device_data->current_ctx;
  194. *zero_digest = false;
  195. /**
  196. * Caller responsible for ctx != NULL.
  197. */
  198. if (HASH_OPER_MODE_HASH == ctx->config.oper_mode) {
  199. if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
  200. memcpy(zero_hash, &zero_message_hash_sha1[0],
  201. SHA1_DIGEST_SIZE);
  202. *zero_hash_size = SHA1_DIGEST_SIZE;
  203. *zero_digest = true;
  204. } else if (HASH_ALGO_SHA256 ==
  205. ctx->config.algorithm) {
  206. memcpy(zero_hash, &zero_message_hash_sha256[0],
  207. SHA256_DIGEST_SIZE);
  208. *zero_hash_size = SHA256_DIGEST_SIZE;
  209. *zero_digest = true;
  210. } else {
  211. dev_err(device_data->dev, "[%s] "
  212. "Incorrect algorithm!"
  213. , __func__);
  214. ret = -EINVAL;
  215. goto out;
  216. }
  217. } else if (HASH_OPER_MODE_HMAC == ctx->config.oper_mode) {
  218. if (!ctx->keylen) {
  219. if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
  220. memcpy(zero_hash, &zero_message_hmac_sha1[0],
  221. SHA1_DIGEST_SIZE);
  222. *zero_hash_size = SHA1_DIGEST_SIZE;
  223. *zero_digest = true;
  224. } else if (HASH_ALGO_SHA256 == ctx->config.algorithm) {
  225. memcpy(zero_hash, &zero_message_hmac_sha256[0],
  226. SHA256_DIGEST_SIZE);
  227. *zero_hash_size = SHA256_DIGEST_SIZE;
  228. *zero_digest = true;
  229. } else {
  230. dev_err(device_data->dev, "[%s] "
  231. "Incorrect algorithm!"
  232. , __func__);
  233. ret = -EINVAL;
  234. goto out;
  235. }
  236. } else {
  237. dev_dbg(device_data->dev, "[%s] Continue hash "
  238. "calculation, since hmac key avalable",
  239. __func__);
  240. }
  241. }
  242. out:
  243. return ret;
  244. }
  245. /**
  246. * hash_disable_power - Request to disable power and clock.
  247. * @device_data: Structure for the hash device.
  248. * @save_device_state: If true, saves the current hw state.
  249. *
  250. * This function request for disabling power (regulator) and clock,
  251. * and could also save current hw state.
  252. */
  253. static int hash_disable_power(
  254. struct hash_device_data *device_data,
  255. bool save_device_state)
  256. {
  257. int ret = 0;
  258. struct device *dev = device_data->dev;
  259. spin_lock(&device_data->power_state_lock);
  260. if (!device_data->power_state)
  261. goto out;
  262. if (save_device_state) {
  263. hash_save_state(device_data,
  264. &device_data->state);
  265. device_data->restore_dev_state = true;
  266. }
  267. clk_disable(device_data->clk);
  268. ret = regulator_disable(device_data->regulator);
  269. if (ret)
  270. dev_err(dev, "[%s] regulator_disable() failed!", __func__);
  271. device_data->power_state = false;
  272. out:
  273. spin_unlock(&device_data->power_state_lock);
  274. return ret;
  275. }
  276. /**
  277. * hash_enable_power - Request to enable power and clock.
  278. * @device_data: Structure for the hash device.
  279. * @restore_device_state: If true, restores a previous saved hw state.
  280. *
  281. * This function request for enabling power (regulator) and clock,
  282. * and could also restore a previously saved hw state.
  283. */
  284. static int hash_enable_power(
  285. struct hash_device_data *device_data,
  286. bool restore_device_state)
  287. {
  288. int ret = 0;
  289. struct device *dev = device_data->dev;
  290. spin_lock(&device_data->power_state_lock);
  291. if (!device_data->power_state) {
  292. ret = regulator_enable(device_data->regulator);
  293. if (ret) {
  294. dev_err(dev, "[%s]: regulator_enable() failed!",
  295. __func__);
  296. goto out;
  297. }
  298. ret = clk_enable(device_data->clk);
  299. if (ret) {
  300. dev_err(dev, "[%s]: clk_enable() failed!",
  301. __func__);
  302. ret = regulator_disable(
  303. device_data->regulator);
  304. goto out;
  305. }
  306. device_data->power_state = true;
  307. }
  308. if (device_data->restore_dev_state) {
  309. if (restore_device_state) {
  310. device_data->restore_dev_state = false;
  311. hash_resume_state(device_data,
  312. &device_data->state);
  313. }
  314. }
  315. out:
  316. spin_unlock(&device_data->power_state_lock);
  317. return ret;
  318. }
  319. /**
  320. * hash_get_device_data - Checks for an available hash device and return it.
  321. * @hash_ctx: Structure for the hash context.
  322. * @device_data: Structure for the hash device.
  323. *
  324. * This function check for an available hash device and return it to
  325. * the caller.
  326. * Note! Caller need to release the device, calling up().
  327. */
  328. static int hash_get_device_data(struct hash_ctx *ctx,
  329. struct hash_device_data **device_data)
  330. {
  331. int ret;
  332. struct klist_iter device_iterator;
  333. struct klist_node *device_node;
  334. struct hash_device_data *local_device_data = NULL;
  335. /* Wait until a device is available */
  336. ret = down_interruptible(&driver_data.device_allocation);
  337. if (ret)
  338. return ret; /* Interrupted */
  339. /* Select a device */
  340. klist_iter_init(&driver_data.device_list, &device_iterator);
  341. device_node = klist_next(&device_iterator);
  342. while (device_node) {
  343. local_device_data = container_of(device_node,
  344. struct hash_device_data, list_node);
  345. spin_lock(&local_device_data->ctx_lock);
  346. /* current_ctx allocates a device, NULL = unallocated */
  347. if (local_device_data->current_ctx) {
  348. device_node = klist_next(&device_iterator);
  349. } else {
  350. local_device_data->current_ctx = ctx;
  351. ctx->device = local_device_data;
  352. spin_unlock(&local_device_data->ctx_lock);
  353. break;
  354. }
  355. spin_unlock(&local_device_data->ctx_lock);
  356. }
  357. klist_iter_exit(&device_iterator);
  358. if (!device_node) {
  359. /**
  360. * No free device found.
  361. * Since we allocated a device with down_interruptible, this
  362. * should not be able to happen.
  363. * Number of available devices, which are contained in
  364. * device_allocation, is therefore decremented by not doing
  365. * an up(device_allocation).
  366. */
  367. return -EBUSY;
  368. }
  369. *device_data = local_device_data;
  370. return 0;
  371. }
  372. /**
  373. * hash_hw_write_key - Writes the key to the hardware registries.
  374. *
  375. * @device_data: Structure for the hash device.
  376. * @key: Key to be written.
  377. * @keylen: The lengt of the key.
  378. *
  379. * Note! This function DOES NOT write to the NBLW registry, even though
  380. * specified in the the hw design spec. Either due to incorrect info in the
  381. * spec or due to a bug in the hw.
  382. */
  383. static void hash_hw_write_key(struct hash_device_data *device_data,
  384. const u8 *key, unsigned int keylen)
  385. {
  386. u32 word = 0;
  387. int nwords = 1;
  388. HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
  389. while (keylen >= 4) {
  390. u32 *key_word = (u32 *)key;
  391. HASH_SET_DIN(key_word, nwords);
  392. keylen -= 4;
  393. key += 4;
  394. }
  395. /* Take care of the remaining bytes in the last word */
  396. if (keylen) {
  397. word = 0;
  398. while (keylen) {
  399. word |= (key[keylen - 1] << (8 * (keylen - 1)));
  400. keylen--;
  401. }
  402. HASH_SET_DIN(&word, nwords);
  403. }
  404. while (device_data->base->str & HASH_STR_DCAL_MASK)
  405. cpu_relax();
  406. HASH_SET_DCAL;
  407. while (device_data->base->str & HASH_STR_DCAL_MASK)
  408. cpu_relax();
  409. }
  410. /**
  411. * init_hash_hw - Initialise the hash hardware for a new calculation.
  412. * @device_data: Structure for the hash device.
  413. * @ctx: The hash context.
  414. *
  415. * This function will enable the bits needed to clear and start a new
  416. * calculation.
  417. */
  418. static int init_hash_hw(struct hash_device_data *device_data,
  419. struct hash_ctx *ctx)
  420. {
  421. int ret = 0;
  422. ret = hash_setconfiguration(device_data, &ctx->config);
  423. if (ret) {
  424. dev_err(device_data->dev, "[%s] hash_setconfiguration() "
  425. "failed!", __func__);
  426. return ret;
  427. }
  428. hash_begin(device_data, ctx);
  429. if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
  430. hash_hw_write_key(device_data, ctx->key, ctx->keylen);
  431. return ret;
  432. }
  433. /**
  434. * hash_get_nents - Return number of entries (nents) in scatterlist (sg).
  435. *
  436. * @sg: Scatterlist.
  437. * @size: Size in bytes.
  438. * @aligned: True if sg data aligned to work in DMA mode.
  439. *
  440. */
  441. static int hash_get_nents(struct scatterlist *sg, int size, bool *aligned)
  442. {
  443. int nents = 0;
  444. bool aligned_data = true;
  445. while (size > 0 && sg) {
  446. nents++;
  447. size -= sg->length;
  448. /* hash_set_dma_transfer will align last nent */
  449. if ((aligned && !IS_ALIGNED(sg->offset, HASH_DMA_ALIGN_SIZE))
  450. || (!IS_ALIGNED(sg->length, HASH_DMA_ALIGN_SIZE) &&
  451. size > 0))
  452. aligned_data = false;
  453. sg = sg_next(sg);
  454. }
  455. if (aligned)
  456. *aligned = aligned_data;
  457. if (size != 0)
  458. return -EFAULT;
  459. return nents;
  460. }
  461. /**
  462. * hash_dma_valid_data - checks for dma valid sg data.
  463. * @sg: Scatterlist.
  464. * @datasize: Datasize in bytes.
  465. *
  466. * NOTE! This function checks for dma valid sg data, since dma
  467. * only accept datasizes of even wordsize.
  468. */
  469. static bool hash_dma_valid_data(struct scatterlist *sg, int datasize)
  470. {
  471. bool aligned;
  472. /* Need to include at least one nent, else error */
  473. if (hash_get_nents(sg, datasize, &aligned) < 1)
  474. return false;
  475. return aligned;
  476. }
  477. /**
  478. * hash_init - Common hash init function for SHA1/SHA2 (SHA256).
  479. * @req: The hash request for the job.
  480. *
  481. * Initialize structures.
  482. */
  483. static int hash_init(struct ahash_request *req)
  484. {
  485. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  486. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  487. struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
  488. if (!ctx->key)
  489. ctx->keylen = 0;
  490. memset(&req_ctx->state, 0, sizeof(struct hash_state));
  491. req_ctx->updated = 0;
  492. if (hash_mode == HASH_MODE_DMA) {
  493. if (req->nbytes < HASH_DMA_ALIGN_SIZE) {
  494. req_ctx->dma_mode = false; /* Don't use DMA */
  495. pr_debug(DEV_DBG_NAME " [%s] DMA mode, but direct "
  496. "to CPU mode for data size < %d",
  497. __func__, HASH_DMA_ALIGN_SIZE);
  498. } else {
  499. if (req->nbytes >= HASH_DMA_PERFORMANCE_MIN_SIZE &&
  500. hash_dma_valid_data(req->src,
  501. req->nbytes)) {
  502. req_ctx->dma_mode = true;
  503. } else {
  504. req_ctx->dma_mode = false;
  505. pr_debug(DEV_DBG_NAME " [%s] DMA mode, but use"
  506. " CPU mode for datalength < %d"
  507. " or non-aligned data, except "
  508. "in last nent", __func__,
  509. HASH_DMA_PERFORMANCE_MIN_SIZE);
  510. }
  511. }
  512. }
  513. return 0;
  514. }
  515. /**
  516. * hash_processblock - This function processes a single block of 512 bits (64
  517. * bytes), word aligned, starting at message.
  518. * @device_data: Structure for the hash device.
  519. * @message: Block (512 bits) of message to be written to
  520. * the HASH hardware.
  521. *
  522. */
  523. static void hash_processblock(
  524. struct hash_device_data *device_data,
  525. const u32 *message, int length)
  526. {
  527. int len = length / HASH_BYTES_PER_WORD;
  528. /*
  529. * NBLW bits. Reset the number of bits in last word (NBLW).
  530. */
  531. HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
  532. /*
  533. * Write message data to the HASH_DIN register.
  534. */
  535. HASH_SET_DIN(message, len);
  536. }
  537. /**
  538. * hash_messagepad - Pads a message and write the nblw bits.
  539. * @device_data: Structure for the hash device.
  540. * @message: Last word of a message.
  541. * @index_bytes: The number of bytes in the last message.
  542. *
  543. * This function manages the final part of the digest calculation, when less
  544. * than 512 bits (64 bytes) remain in message. This means index_bytes < 64.
  545. *
  546. */
  547. static void hash_messagepad(struct hash_device_data *device_data,
  548. const u32 *message, u8 index_bytes)
  549. {
  550. int nwords = 1;
  551. /*
  552. * Clear hash str register, only clear NBLW
  553. * since DCAL will be reset by hardware.
  554. */
  555. HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
  556. /* Main loop */
  557. while (index_bytes >= 4) {
  558. HASH_SET_DIN(message, nwords);
  559. index_bytes -= 4;
  560. message++;
  561. }
  562. if (index_bytes)
  563. HASH_SET_DIN(message, nwords);
  564. while (device_data->base->str & HASH_STR_DCAL_MASK)
  565. cpu_relax();
  566. /* num_of_bytes == 0 => NBLW <- 0 (32 bits valid in DATAIN) */
  567. HASH_SET_NBLW(index_bytes * 8);
  568. dev_dbg(device_data->dev, "[%s] DIN=0x%08x NBLW=%d", __func__,
  569. readl_relaxed(&device_data->base->din),
  570. (int)(readl_relaxed(&device_data->base->str) &
  571. HASH_STR_NBLW_MASK));
  572. HASH_SET_DCAL;
  573. dev_dbg(device_data->dev, "[%s] after dcal -> DIN=0x%08x NBLW=%d",
  574. __func__, readl_relaxed(&device_data->base->din),
  575. (int)(readl_relaxed(&device_data->base->str) &
  576. HASH_STR_NBLW_MASK));
  577. while (device_data->base->str & HASH_STR_DCAL_MASK)
  578. cpu_relax();
  579. }
  580. /**
  581. * hash_incrementlength - Increments the length of the current message.
  582. * @ctx: Hash context
  583. * @incr: Length of message processed already
  584. *
  585. * Overflow cannot occur, because conditions for overflow are checked in
  586. * hash_hw_update.
  587. */
  588. static void hash_incrementlength(struct hash_req_ctx *ctx, u32 incr)
  589. {
  590. ctx->state.length.low_word += incr;
  591. /* Check for wrap-around */
  592. if (ctx->state.length.low_word < incr)
  593. ctx->state.length.high_word++;
  594. }
  595. /**
  596. * hash_setconfiguration - Sets the required configuration for the hash
  597. * hardware.
  598. * @device_data: Structure for the hash device.
  599. * @config: Pointer to a configuration structure.
  600. */
  601. int hash_setconfiguration(struct hash_device_data *device_data,
  602. struct hash_config *config)
  603. {
  604. int ret = 0;
  605. if (config->algorithm != HASH_ALGO_SHA1 &&
  606. config->algorithm != HASH_ALGO_SHA256)
  607. return -EPERM;
  608. /*
  609. * DATAFORM bits. Set the DATAFORM bits to 0b11, which means the data
  610. * to be written to HASH_DIN is considered as 32 bits.
  611. */
  612. HASH_SET_DATA_FORMAT(config->data_format);
  613. /*
  614. * ALGO bit. Set to 0b1 for SHA-1 and 0b0 for SHA-256
  615. */
  616. switch (config->algorithm) {
  617. case HASH_ALGO_SHA1:
  618. HASH_SET_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
  619. break;
  620. case HASH_ALGO_SHA256:
  621. HASH_CLEAR_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
  622. break;
  623. default:
  624. dev_err(device_data->dev, "[%s] Incorrect algorithm.",
  625. __func__);
  626. return -EPERM;
  627. }
  628. /*
  629. * MODE bit. This bit selects between HASH or HMAC mode for the
  630. * selected algorithm. 0b0 = HASH and 0b1 = HMAC.
  631. */
  632. if (HASH_OPER_MODE_HASH == config->oper_mode)
  633. HASH_CLEAR_BITS(&device_data->base->cr,
  634. HASH_CR_MODE_MASK);
  635. else if (HASH_OPER_MODE_HMAC == config->oper_mode) {
  636. HASH_SET_BITS(&device_data->base->cr,
  637. HASH_CR_MODE_MASK);
  638. if (device_data->current_ctx->keylen > HASH_BLOCK_SIZE) {
  639. /* Truncate key to blocksize */
  640. dev_dbg(device_data->dev, "[%s] LKEY set", __func__);
  641. HASH_SET_BITS(&device_data->base->cr,
  642. HASH_CR_LKEY_MASK);
  643. } else {
  644. dev_dbg(device_data->dev, "[%s] LKEY cleared",
  645. __func__);
  646. HASH_CLEAR_BITS(&device_data->base->cr,
  647. HASH_CR_LKEY_MASK);
  648. }
  649. } else { /* Wrong hash mode */
  650. ret = -EPERM;
  651. dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!",
  652. __func__);
  653. }
  654. return ret;
  655. }
  656. /**
  657. * hash_begin - This routine resets some globals and initializes the hash
  658. * hardware.
  659. * @device_data: Structure for the hash device.
  660. * @ctx: Hash context.
  661. */
  662. void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx)
  663. {
  664. /* HW and SW initializations */
  665. /* Note: there is no need to initialize buffer and digest members */
  666. while (device_data->base->str & HASH_STR_DCAL_MASK)
  667. cpu_relax();
  668. /*
  669. * INIT bit. Set this bit to 0b1 to reset the HASH processor core and
  670. * prepare the initialize the HASH accelerator to compute the message
  671. * digest of a new message.
  672. */
  673. HASH_INITIALIZE;
  674. /*
  675. * NBLW bits. Reset the number of bits in last word (NBLW).
  676. */
  677. HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
  678. }
  679. int hash_process_data(
  680. struct hash_device_data *device_data,
  681. struct hash_ctx *ctx, struct hash_req_ctx *req_ctx,
  682. int msg_length, u8 *data_buffer, u8 *buffer, u8 *index)
  683. {
  684. int ret = 0;
  685. u32 count;
  686. do {
  687. if ((*index + msg_length) < HASH_BLOCK_SIZE) {
  688. for (count = 0; count < msg_length; count++) {
  689. buffer[*index + count] =
  690. *(data_buffer + count);
  691. }
  692. *index += msg_length;
  693. msg_length = 0;
  694. } else {
  695. if (req_ctx->updated) {
  696. ret = hash_resume_state(device_data,
  697. &device_data->state);
  698. memmove(req_ctx->state.buffer,
  699. device_data->state.buffer,
  700. HASH_BLOCK_SIZE / sizeof(u32));
  701. if (ret) {
  702. dev_err(device_data->dev, "[%s] "
  703. "hash_resume_state()"
  704. " failed!", __func__);
  705. goto out;
  706. }
  707. } else {
  708. ret = init_hash_hw(device_data, ctx);
  709. if (ret) {
  710. dev_err(device_data->dev, "[%s] "
  711. "init_hash_hw()"
  712. " failed!", __func__);
  713. goto out;
  714. }
  715. req_ctx->updated = 1;
  716. }
  717. /*
  718. * If 'data_buffer' is four byte aligned and
  719. * local buffer does not have any data, we can
  720. * write data directly from 'data_buffer' to
  721. * HW peripheral, otherwise we first copy data
  722. * to a local buffer
  723. */
  724. if ((0 == (((u32)data_buffer) % 4))
  725. && (0 == *index))
  726. hash_processblock(device_data,
  727. (const u32 *)
  728. data_buffer, HASH_BLOCK_SIZE);
  729. else {
  730. for (count = 0; count <
  731. (u32)(HASH_BLOCK_SIZE -
  732. *index);
  733. count++) {
  734. buffer[*index + count] =
  735. *(data_buffer + count);
  736. }
  737. hash_processblock(device_data,
  738. (const u32 *)buffer,
  739. HASH_BLOCK_SIZE);
  740. }
  741. hash_incrementlength(req_ctx, HASH_BLOCK_SIZE);
  742. data_buffer += (HASH_BLOCK_SIZE - *index);
  743. msg_length -= (HASH_BLOCK_SIZE - *index);
  744. *index = 0;
  745. ret = hash_save_state(device_data,
  746. &device_data->state);
  747. memmove(device_data->state.buffer,
  748. req_ctx->state.buffer,
  749. HASH_BLOCK_SIZE / sizeof(u32));
  750. if (ret) {
  751. dev_err(device_data->dev, "[%s] "
  752. "hash_save_state()"
  753. " failed!", __func__);
  754. goto out;
  755. }
  756. }
  757. } while (msg_length != 0);
  758. out:
  759. return ret;
  760. }
  761. /**
  762. * hash_dma_final - The hash dma final function for SHA1/SHA256.
  763. * @req: The hash request for the job.
  764. */
  765. static int hash_dma_final(struct ahash_request *req)
  766. {
  767. int ret = 0;
  768. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  769. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  770. struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
  771. struct hash_device_data *device_data;
  772. u8 digest[SHA256_DIGEST_SIZE];
  773. int bytes_written = 0;
  774. ret = hash_get_device_data(ctx, &device_data);
  775. if (ret)
  776. return ret;
  777. dev_dbg(device_data->dev, "[%s] (ctx=0x%x)!", __func__, (u32) ctx);
  778. if (req_ctx->updated) {
  779. ret = hash_resume_state(device_data, &device_data->state);
  780. if (ret) {
  781. dev_err(device_data->dev, "[%s] hash_resume_state() "
  782. "failed!", __func__);
  783. goto out;
  784. }
  785. }
  786. if (!req_ctx->updated) {
  787. ret = hash_setconfiguration(device_data, &ctx->config);
  788. if (ret) {
  789. dev_err(device_data->dev, "[%s] "
  790. "hash_setconfiguration() failed!",
  791. __func__);
  792. goto out;
  793. }
  794. /* Enable DMA input */
  795. if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode) {
  796. HASH_CLEAR_BITS(&device_data->base->cr,
  797. HASH_CR_DMAE_MASK);
  798. } else {
  799. HASH_SET_BITS(&device_data->base->cr,
  800. HASH_CR_DMAE_MASK);
  801. HASH_SET_BITS(&device_data->base->cr,
  802. HASH_CR_PRIVN_MASK);
  803. }
  804. HASH_INITIALIZE;
  805. if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
  806. hash_hw_write_key(device_data, ctx->key, ctx->keylen);
  807. /* Number of bits in last word = (nbytes * 8) % 32 */
  808. HASH_SET_NBLW((req->nbytes * 8) % 32);
  809. req_ctx->updated = 1;
  810. }
  811. /* Store the nents in the dma struct. */
  812. ctx->device->dma.nents = hash_get_nents(req->src, req->nbytes, NULL);
  813. if (!ctx->device->dma.nents) {
  814. dev_err(device_data->dev, "[%s] "
  815. "ctx->device->dma.nents = 0", __func__);
  816. goto out;
  817. }
  818. bytes_written = hash_dma_write(ctx, req->src, req->nbytes);
  819. if (bytes_written != req->nbytes) {
  820. dev_err(device_data->dev, "[%s] "
  821. "hash_dma_write() failed!", __func__);
  822. goto out;
  823. }
  824. wait_for_completion(&ctx->device->dma.complete);
  825. hash_dma_done(ctx);
  826. while (device_data->base->str & HASH_STR_DCAL_MASK)
  827. cpu_relax();
  828. if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
  829. unsigned int keylen = ctx->keylen;
  830. u8 *key = ctx->key;
  831. dev_dbg(device_data->dev, "[%s] keylen: %d", __func__,
  832. ctx->keylen);
  833. hash_hw_write_key(device_data, key, keylen);
  834. }
  835. hash_get_digest(device_data, digest, ctx->config.algorithm);
  836. memcpy(req->result, digest, ctx->digestsize);
  837. out:
  838. release_hash_device(device_data);
  839. /**
  840. * Allocated in setkey, and only used in HMAC.
  841. */
  842. kfree(ctx->key);
  843. return ret;
  844. }
  845. /**
  846. * hash_hw_final - The final hash calculation function
  847. * @req: The hash request for the job.
  848. */
  849. int hash_hw_final(struct ahash_request *req)
  850. {
  851. int ret = 0;
  852. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  853. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  854. struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
  855. struct hash_device_data *device_data;
  856. u8 digest[SHA256_DIGEST_SIZE];
  857. ret = hash_get_device_data(ctx, &device_data);
  858. if (ret)
  859. return ret;
  860. dev_dbg(device_data->dev, "[%s] (ctx=0x%x)!", __func__, (u32) ctx);
  861. if (req_ctx->updated) {
  862. ret = hash_resume_state(device_data, &device_data->state);
  863. if (ret) {
  864. dev_err(device_data->dev, "[%s] hash_resume_state() "
  865. "failed!", __func__);
  866. goto out;
  867. }
  868. } else if (req->nbytes == 0 && ctx->keylen == 0) {
  869. u8 zero_hash[SHA256_DIGEST_SIZE];
  870. u32 zero_hash_size = 0;
  871. bool zero_digest = false;
  872. /**
  873. * Use a pre-calculated empty message digest
  874. * (workaround since hw return zeroes, hw bug!?)
  875. */
  876. ret = get_empty_message_digest(device_data, &zero_hash[0],
  877. &zero_hash_size, &zero_digest);
  878. if (!ret && likely(zero_hash_size == ctx->digestsize) &&
  879. zero_digest) {
  880. memcpy(req->result, &zero_hash[0], ctx->digestsize);
  881. goto out;
  882. } else if (!ret && !zero_digest) {
  883. dev_dbg(device_data->dev, "[%s] HMAC zero msg with "
  884. "key, continue...", __func__);
  885. } else {
  886. dev_err(device_data->dev, "[%s] ret=%d, or wrong "
  887. "digest size? %s", __func__, ret,
  888. (zero_hash_size == ctx->digestsize) ?
  889. "true" : "false");
  890. /* Return error */
  891. goto out;
  892. }
  893. } else if (req->nbytes == 0 && ctx->keylen > 0) {
  894. dev_err(device_data->dev, "[%s] Empty message with "
  895. "keylength > 0, NOT supported.", __func__);
  896. goto out;
  897. }
  898. if (!req_ctx->updated) {
  899. ret = init_hash_hw(device_data, ctx);
  900. if (ret) {
  901. dev_err(device_data->dev, "[%s] init_hash_hw() "
  902. "failed!", __func__);
  903. goto out;
  904. }
  905. }
  906. if (req_ctx->state.index) {
  907. hash_messagepad(device_data, req_ctx->state.buffer,
  908. req_ctx->state.index);
  909. } else {
  910. HASH_SET_DCAL;
  911. while (device_data->base->str & HASH_STR_DCAL_MASK)
  912. cpu_relax();
  913. }
  914. if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
  915. unsigned int keylen = ctx->keylen;
  916. u8 *key = ctx->key;
  917. dev_dbg(device_data->dev, "[%s] keylen: %d", __func__,
  918. ctx->keylen);
  919. hash_hw_write_key(device_data, key, keylen);
  920. }
  921. hash_get_digest(device_data, digest, ctx->config.algorithm);
  922. memcpy(req->result, digest, ctx->digestsize);
  923. out:
  924. release_hash_device(device_data);
  925. /**
  926. * Allocated in setkey, and only used in HMAC.
  927. */
  928. kfree(ctx->key);
  929. return ret;
  930. }
  931. /**
  932. * hash_hw_update - Updates current HASH computation hashing another part of
  933. * the message.
  934. * @req: Byte array containing the message to be hashed (caller
  935. * allocated).
  936. */
  937. int hash_hw_update(struct ahash_request *req)
  938. {
  939. int ret = 0;
  940. u8 index = 0;
  941. u8 *buffer;
  942. struct hash_device_data *device_data;
  943. u8 *data_buffer;
  944. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  945. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  946. struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
  947. struct crypto_hash_walk walk;
  948. int msg_length = crypto_hash_walk_first(req, &walk);
  949. /* Empty message ("") is correct indata */
  950. if (msg_length == 0)
  951. return ret;
  952. index = req_ctx->state.index;
  953. buffer = (u8 *)req_ctx->state.buffer;
  954. /* Check if ctx->state.length + msg_length
  955. overflows */
  956. if (msg_length > (req_ctx->state.length.low_word + msg_length) &&
  957. HASH_HIGH_WORD_MAX_VAL ==
  958. req_ctx->state.length.high_word) {
  959. pr_err(DEV_DBG_NAME " [%s] HASH_MSG_LENGTH_OVERFLOW!",
  960. __func__);
  961. return -EPERM;
  962. }
  963. ret = hash_get_device_data(ctx, &device_data);
  964. if (ret)
  965. return ret;
  966. /* Main loop */
  967. while (0 != msg_length) {
  968. data_buffer = walk.data;
  969. ret = hash_process_data(device_data, ctx, req_ctx, msg_length,
  970. data_buffer, buffer, &index);
  971. if (ret) {
  972. dev_err(device_data->dev, "[%s] hash_internal_hw_"
  973. "update() failed!", __func__);
  974. goto out;
  975. }
  976. msg_length = crypto_hash_walk_done(&walk, 0);
  977. }
  978. req_ctx->state.index = index;
  979. dev_dbg(device_data->dev, "[%s] indata length=%d, bin=%d))",
  980. __func__, req_ctx->state.index,
  981. req_ctx->state.bit_index);
  982. out:
  983. release_hash_device(device_data);
  984. return ret;
  985. }
  986. /**
  987. * hash_resume_state - Function that resumes the state of an calculation.
  988. * @device_data: Pointer to the device structure.
  989. * @device_state: The state to be restored in the hash hardware
  990. */
  991. int hash_resume_state(struct hash_device_data *device_data,
  992. const struct hash_state *device_state)
  993. {
  994. u32 temp_cr;
  995. s32 count;
  996. int hash_mode = HASH_OPER_MODE_HASH;
  997. if (NULL == device_state) {
  998. dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!",
  999. __func__);
  1000. return -EPERM;
  1001. }
  1002. /* Check correctness of index and length members */
  1003. if (device_state->index > HASH_BLOCK_SIZE
  1004. || (device_state->length.low_word % HASH_BLOCK_SIZE) != 0) {
  1005. dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!",
  1006. __func__);
  1007. return -EPERM;
  1008. }
  1009. /*
  1010. * INIT bit. Set this bit to 0b1 to reset the HASH processor core and
  1011. * prepare the initialize the HASH accelerator to compute the message
  1012. * digest of a new message.
  1013. */
  1014. HASH_INITIALIZE;
  1015. temp_cr = device_state->temp_cr;
  1016. writel_relaxed(temp_cr & HASH_CR_RESUME_MASK, &device_data->base->cr);
  1017. if (device_data->base->cr & HASH_CR_MODE_MASK)
  1018. hash_mode = HASH_OPER_MODE_HMAC;
  1019. else
  1020. hash_mode = HASH_OPER_MODE_HASH;
  1021. for (count = 0; count < HASH_CSR_COUNT; count++) {
  1022. if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
  1023. break;
  1024. writel_relaxed(device_state->csr[count],
  1025. &device_data->base->csrx[count]);
  1026. }
  1027. writel_relaxed(device_state->csfull, &device_data->base->csfull);
  1028. writel_relaxed(device_state->csdatain, &device_data->base->csdatain);
  1029. writel_relaxed(device_state->str_reg, &device_data->base->str);
  1030. writel_relaxed(temp_cr, &device_data->base->cr);
  1031. return 0;
  1032. }
  1033. /**
  1034. * hash_save_state - Function that saves the state of hardware.
  1035. * @device_data: Pointer to the device structure.
  1036. * @device_state: The strucure where the hardware state should be saved.
  1037. */
  1038. int hash_save_state(struct hash_device_data *device_data,
  1039. struct hash_state *device_state)
  1040. {
  1041. u32 temp_cr;
  1042. u32 count;
  1043. int hash_mode = HASH_OPER_MODE_HASH;
  1044. if (NULL == device_state) {
  1045. dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!",
  1046. __func__);
  1047. return -ENOTSUPP;
  1048. }
  1049. /* Write dummy value to force digest intermediate calculation. This
  1050. * actually makes sure that there isn't any ongoing calculation in the
  1051. * hardware.
  1052. */
  1053. while (device_data->base->str & HASH_STR_DCAL_MASK)
  1054. cpu_relax();
  1055. temp_cr = readl_relaxed(&device_data->base->cr);
  1056. device_state->str_reg = readl_relaxed(&device_data->base->str);
  1057. device_state->din_reg = readl_relaxed(&device_data->base->din);
  1058. if (device_data->base->cr & HASH_CR_MODE_MASK)
  1059. hash_mode = HASH_OPER_MODE_HMAC;
  1060. else
  1061. hash_mode = HASH_OPER_MODE_HASH;
  1062. for (count = 0; count < HASH_CSR_COUNT; count++) {
  1063. if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
  1064. break;
  1065. device_state->csr[count] =
  1066. readl_relaxed(&device_data->base->csrx[count]);
  1067. }
  1068. device_state->csfull = readl_relaxed(&device_data->base->csfull);
  1069. device_state->csdatain = readl_relaxed(&device_data->base->csdatain);
  1070. device_state->temp_cr = temp_cr;
  1071. return 0;
  1072. }
  1073. /**
  1074. * hash_check_hw - This routine checks for peripheral Ids and PCell Ids.
  1075. * @device_data:
  1076. *
  1077. */
  1078. int hash_check_hw(struct hash_device_data *device_data)
  1079. {
  1080. /* Checking Peripheral Ids */
  1081. if (HASH_P_ID0 == readl_relaxed(&device_data->base->periphid0)
  1082. && HASH_P_ID1 == readl_relaxed(&device_data->base->periphid1)
  1083. && HASH_P_ID2 == readl_relaxed(&device_data->base->periphid2)
  1084. && HASH_P_ID3 == readl_relaxed(&device_data->base->periphid3)
  1085. && HASH_CELL_ID0 == readl_relaxed(&device_data->base->cellid0)
  1086. && HASH_CELL_ID1 == readl_relaxed(&device_data->base->cellid1)
  1087. && HASH_CELL_ID2 == readl_relaxed(&device_data->base->cellid2)
  1088. && HASH_CELL_ID3 == readl_relaxed(&device_data->base->cellid3)
  1089. ) {
  1090. return 0;
  1091. }
  1092. dev_err(device_data->dev, "[%s] HASH_UNSUPPORTED_HW!",
  1093. __func__);
  1094. return -ENOTSUPP;
  1095. }
  1096. /**
  1097. * hash_get_digest - Gets the digest.
  1098. * @device_data: Pointer to the device structure.
  1099. * @digest: User allocated byte array for the calculated digest.
  1100. * @algorithm: The algorithm in use.
  1101. */
  1102. void hash_get_digest(struct hash_device_data *device_data,
  1103. u8 *digest, int algorithm)
  1104. {
  1105. u32 temp_hx_val, count;
  1106. int loop_ctr;
  1107. if (algorithm != HASH_ALGO_SHA1 && algorithm != HASH_ALGO_SHA256) {
  1108. dev_err(device_data->dev, "[%s] Incorrect algorithm %d",
  1109. __func__, algorithm);
  1110. return;
  1111. }
  1112. if (algorithm == HASH_ALGO_SHA1)
  1113. loop_ctr = SHA1_DIGEST_SIZE / sizeof(u32);
  1114. else
  1115. loop_ctr = SHA256_DIGEST_SIZE / sizeof(u32);
  1116. dev_dbg(device_data->dev, "[%s] digest array:(0x%x)",
  1117. __func__, (u32) digest);
  1118. /* Copy result into digest array */
  1119. for (count = 0; count < loop_ctr; count++) {
  1120. temp_hx_val = readl_relaxed(&device_data->base->hx[count]);
  1121. digest[count * 4] = (u8) ((temp_hx_val >> 24) & 0xFF);
  1122. digest[count * 4 + 1] = (u8) ((temp_hx_val >> 16) & 0xFF);
  1123. digest[count * 4 + 2] = (u8) ((temp_hx_val >> 8) & 0xFF);
  1124. digest[count * 4 + 3] = (u8) ((temp_hx_val >> 0) & 0xFF);
  1125. }
  1126. }
  1127. /**
  1128. * hash_update - The hash update function for SHA1/SHA2 (SHA256).
  1129. * @req: The hash request for the job.
  1130. */
  1131. static int ahash_update(struct ahash_request *req)
  1132. {
  1133. int ret = 0;
  1134. struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
  1135. if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode)
  1136. ret = hash_hw_update(req);
  1137. /* Skip update for DMA, all data will be passed to DMA in final */
  1138. if (ret) {
  1139. pr_err(DEV_DBG_NAME " [%s] hash_hw_update() failed!",
  1140. __func__);
  1141. }
  1142. return ret;
  1143. }
  1144. /**
  1145. * hash_final - The hash final function for SHA1/SHA2 (SHA256).
  1146. * @req: The hash request for the job.
  1147. */
  1148. static int ahash_final(struct ahash_request *req)
  1149. {
  1150. int ret = 0;
  1151. struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
  1152. pr_debug(DEV_DBG_NAME " [%s] data size: %d", __func__, req->nbytes);
  1153. if ((hash_mode == HASH_MODE_DMA) && req_ctx->dma_mode)
  1154. ret = hash_dma_final(req);
  1155. else
  1156. ret = hash_hw_final(req);
  1157. if (ret) {
  1158. pr_err(DEV_DBG_NAME " [%s] hash_hw/dma_final() failed",
  1159. __func__);
  1160. }
  1161. return ret;
  1162. }
  1163. static int hash_setkey(struct crypto_ahash *tfm,
  1164. const u8 *key, unsigned int keylen, int alg)
  1165. {
  1166. int ret = 0;
  1167. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  1168. /**
  1169. * Freed in final.
  1170. */
  1171. ctx->key = kmalloc(keylen, GFP_KERNEL);
  1172. if (!ctx->key) {
  1173. pr_err(DEV_DBG_NAME " [%s] Failed to allocate ctx->key "
  1174. "for %d\n", __func__, alg);
  1175. return -ENOMEM;
  1176. }
  1177. memcpy(ctx->key, key, keylen);
  1178. ctx->keylen = keylen;
  1179. return ret;
  1180. }
  1181. static int ahash_sha1_init(struct ahash_request *req)
  1182. {
  1183. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1184. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  1185. ctx->config.data_format = HASH_DATA_8_BITS;
  1186. ctx->config.algorithm = HASH_ALGO_SHA1;
  1187. ctx->config.oper_mode = HASH_OPER_MODE_HASH;
  1188. ctx->digestsize = SHA1_DIGEST_SIZE;
  1189. return hash_init(req);
  1190. }
  1191. static int ahash_sha256_init(struct ahash_request *req)
  1192. {
  1193. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1194. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  1195. ctx->config.data_format = HASH_DATA_8_BITS;
  1196. ctx->config.algorithm = HASH_ALGO_SHA256;
  1197. ctx->config.oper_mode = HASH_OPER_MODE_HASH;
  1198. ctx->digestsize = SHA256_DIGEST_SIZE;
  1199. return hash_init(req);
  1200. }
  1201. static int ahash_sha1_digest(struct ahash_request *req)
  1202. {
  1203. int ret2, ret1;
  1204. ret1 = ahash_sha1_init(req);
  1205. if (ret1)
  1206. goto out;
  1207. ret1 = ahash_update(req);
  1208. ret2 = ahash_final(req);
  1209. out:
  1210. return ret1 ? ret1 : ret2;
  1211. }
  1212. static int ahash_sha256_digest(struct ahash_request *req)
  1213. {
  1214. int ret2, ret1;
  1215. ret1 = ahash_sha256_init(req);
  1216. if (ret1)
  1217. goto out;
  1218. ret1 = ahash_update(req);
  1219. ret2 = ahash_final(req);
  1220. out:
  1221. return ret1 ? ret1 : ret2;
  1222. }
  1223. static int hmac_sha1_init(struct ahash_request *req)
  1224. {
  1225. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1226. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  1227. ctx->config.data_format = HASH_DATA_8_BITS;
  1228. ctx->config.algorithm = HASH_ALGO_SHA1;
  1229. ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
  1230. ctx->digestsize = SHA1_DIGEST_SIZE;
  1231. return hash_init(req);
  1232. }
  1233. static int hmac_sha256_init(struct ahash_request *req)
  1234. {
  1235. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1236. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  1237. ctx->config.data_format = HASH_DATA_8_BITS;
  1238. ctx->config.algorithm = HASH_ALGO_SHA256;
  1239. ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
  1240. ctx->digestsize = SHA256_DIGEST_SIZE;
  1241. return hash_init(req);
  1242. }
  1243. static int hmac_sha1_digest(struct ahash_request *req)
  1244. {
  1245. int ret2, ret1;
  1246. ret1 = hmac_sha1_init(req);
  1247. if (ret1)
  1248. goto out;
  1249. ret1 = ahash_update(req);
  1250. ret2 = ahash_final(req);
  1251. out:
  1252. return ret1 ? ret1 : ret2;
  1253. }
  1254. static int hmac_sha256_digest(struct ahash_request *req)
  1255. {
  1256. int ret2, ret1;
  1257. ret1 = hmac_sha256_init(req);
  1258. if (ret1)
  1259. goto out;
  1260. ret1 = ahash_update(req);
  1261. ret2 = ahash_final(req);
  1262. out:
  1263. return ret1 ? ret1 : ret2;
  1264. }
  1265. static int hmac_sha1_setkey(struct crypto_ahash *tfm,
  1266. const u8 *key, unsigned int keylen)
  1267. {
  1268. return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA1);
  1269. }
  1270. static int hmac_sha256_setkey(struct crypto_ahash *tfm,
  1271. const u8 *key, unsigned int keylen)
  1272. {
  1273. return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA256);
  1274. }
  1275. struct hash_algo_template {
  1276. struct hash_config conf;
  1277. struct ahash_alg hash;
  1278. };
  1279. static int hash_cra_init(struct crypto_tfm *tfm)
  1280. {
  1281. struct hash_ctx *ctx = crypto_tfm_ctx(tfm);
  1282. struct crypto_alg *alg = tfm->__crt_alg;
  1283. struct hash_algo_template *hash_alg;
  1284. hash_alg = container_of(__crypto_ahash_alg(alg),
  1285. struct hash_algo_template,
  1286. hash);
  1287. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  1288. sizeof(struct hash_req_ctx));
  1289. ctx->config.data_format = HASH_DATA_8_BITS;
  1290. ctx->config.algorithm = hash_alg->conf.algorithm;
  1291. ctx->config.oper_mode = hash_alg->conf.oper_mode;
  1292. ctx->digestsize = hash_alg->hash.halg.digestsize;
  1293. return 0;
  1294. }
  1295. static struct hash_algo_template hash_algs[] = {
  1296. {
  1297. .conf.algorithm = HASH_ALGO_SHA1,
  1298. .conf.oper_mode = HASH_OPER_MODE_HASH,
  1299. .hash = {
  1300. .init = hash_init,
  1301. .update = ahash_update,
  1302. .final = ahash_final,
  1303. .digest = ahash_sha1_digest,
  1304. .halg.digestsize = SHA1_DIGEST_SIZE,
  1305. .halg.statesize = sizeof(struct hash_ctx),
  1306. .halg.base = {
  1307. .cra_name = "sha1",
  1308. .cra_driver_name = "sha1-ux500",
  1309. .cra_flags = CRYPTO_ALG_TYPE_AHASH |
  1310. CRYPTO_ALG_ASYNC,
  1311. .cra_blocksize = SHA1_BLOCK_SIZE,
  1312. .cra_ctxsize = sizeof(struct hash_ctx),
  1313. .cra_init = hash_cra_init,
  1314. .cra_module = THIS_MODULE,
  1315. }
  1316. }
  1317. },
  1318. {
  1319. .conf.algorithm = HASH_ALGO_SHA256,
  1320. .conf.oper_mode = HASH_OPER_MODE_HASH,
  1321. .hash = {
  1322. .init = hash_init,
  1323. .update = ahash_update,
  1324. .final = ahash_final,
  1325. .digest = ahash_sha256_digest,
  1326. .halg.digestsize = SHA256_DIGEST_SIZE,
  1327. .halg.statesize = sizeof(struct hash_ctx),
  1328. .halg.base = {
  1329. .cra_name = "sha256",
  1330. .cra_driver_name = "sha256-ux500",
  1331. .cra_flags = CRYPTO_ALG_TYPE_AHASH |
  1332. CRYPTO_ALG_ASYNC,
  1333. .cra_blocksize = SHA256_BLOCK_SIZE,
  1334. .cra_ctxsize = sizeof(struct hash_ctx),
  1335. .cra_type = &crypto_ahash_type,
  1336. .cra_init = hash_cra_init,
  1337. .cra_module = THIS_MODULE,
  1338. }
  1339. }
  1340. },
  1341. {
  1342. .conf.algorithm = HASH_ALGO_SHA1,
  1343. .conf.oper_mode = HASH_OPER_MODE_HMAC,
  1344. .hash = {
  1345. .init = hash_init,
  1346. .update = ahash_update,
  1347. .final = ahash_final,
  1348. .digest = hmac_sha1_digest,
  1349. .setkey = hmac_sha1_setkey,
  1350. .halg.digestsize = SHA1_DIGEST_SIZE,
  1351. .halg.statesize = sizeof(struct hash_ctx),
  1352. .halg.base = {
  1353. .cra_name = "hmac(sha1)",
  1354. .cra_driver_name = "hmac-sha1-ux500",
  1355. .cra_flags = CRYPTO_ALG_TYPE_AHASH |
  1356. CRYPTO_ALG_ASYNC,
  1357. .cra_blocksize = SHA1_BLOCK_SIZE,
  1358. .cra_ctxsize = sizeof(struct hash_ctx),
  1359. .cra_type = &crypto_ahash_type,
  1360. .cra_init = hash_cra_init,
  1361. .cra_module = THIS_MODULE,
  1362. }
  1363. }
  1364. },
  1365. {
  1366. .conf.algorithm = HASH_ALGO_SHA256,
  1367. .conf.oper_mode = HASH_OPER_MODE_HMAC,
  1368. .hash = {
  1369. .init = hash_init,
  1370. .update = ahash_update,
  1371. .final = ahash_final,
  1372. .digest = hmac_sha256_digest,
  1373. .setkey = hmac_sha256_setkey,
  1374. .halg.digestsize = SHA256_DIGEST_SIZE,
  1375. .halg.statesize = sizeof(struct hash_ctx),
  1376. .halg.base = {
  1377. .cra_name = "hmac(sha256)",
  1378. .cra_driver_name = "hmac-sha256-ux500",
  1379. .cra_flags = CRYPTO_ALG_TYPE_AHASH |
  1380. CRYPTO_ALG_ASYNC,
  1381. .cra_blocksize = SHA256_BLOCK_SIZE,
  1382. .cra_ctxsize = sizeof(struct hash_ctx),
  1383. .cra_type = &crypto_ahash_type,
  1384. .cra_init = hash_cra_init,
  1385. .cra_module = THIS_MODULE,
  1386. }
  1387. }
  1388. }
  1389. };
  1390. /**
  1391. * hash_algs_register_all -
  1392. */
  1393. static int ahash_algs_register_all(struct hash_device_data *device_data)
  1394. {
  1395. int ret;
  1396. int i;
  1397. int count;
  1398. for (i = 0; i < ARRAY_SIZE(hash_algs); i++) {
  1399. ret = crypto_register_ahash(&hash_algs[i].hash);
  1400. if (ret) {
  1401. count = i;
  1402. dev_err(device_data->dev, "[%s] alg registration failed",
  1403. hash_algs[i].hash.halg.base.cra_driver_name);
  1404. goto unreg;
  1405. }
  1406. }
  1407. return 0;
  1408. unreg:
  1409. for (i = 0; i < count; i++)
  1410. crypto_unregister_ahash(&hash_algs[i].hash);
  1411. return ret;
  1412. }
  1413. /**
  1414. * hash_algs_unregister_all -
  1415. */
  1416. static void ahash_algs_unregister_all(struct hash_device_data *device_data)
  1417. {
  1418. int i;
  1419. for (i = 0; i < ARRAY_SIZE(hash_algs); i++)
  1420. crypto_unregister_ahash(&hash_algs[i].hash);
  1421. }
  1422. /**
  1423. * ux500_hash_probe - Function that probes the hash hardware.
  1424. * @pdev: The platform device.
  1425. */
  1426. static int ux500_hash_probe(struct platform_device *pdev)
  1427. {
  1428. int ret = 0;
  1429. struct resource *res = NULL;
  1430. struct hash_device_data *device_data;
  1431. struct device *dev = &pdev->dev;
  1432. device_data = kzalloc(sizeof(struct hash_device_data), GFP_ATOMIC);
  1433. if (!device_data) {
  1434. dev_dbg(dev, "[%s] kzalloc() failed!", __func__);
  1435. ret = -ENOMEM;
  1436. goto out;
  1437. }
  1438. device_data->dev = dev;
  1439. device_data->current_ctx = NULL;
  1440. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1441. if (!res) {
  1442. dev_dbg(dev, "[%s] platform_get_resource() failed!", __func__);
  1443. ret = -ENODEV;
  1444. goto out_kfree;
  1445. }
  1446. res = request_mem_region(res->start, resource_size(res), pdev->name);
  1447. if (res == NULL) {
  1448. dev_dbg(dev, "[%s] request_mem_region() failed!", __func__);
  1449. ret = -EBUSY;
  1450. goto out_kfree;
  1451. }
  1452. device_data->base = ioremap(res->start, resource_size(res));
  1453. if (!device_data->base) {
  1454. dev_err(dev, "[%s] ioremap() failed!",
  1455. __func__);
  1456. ret = -ENOMEM;
  1457. goto out_free_mem;
  1458. }
  1459. spin_lock_init(&device_data->ctx_lock);
  1460. spin_lock_init(&device_data->power_state_lock);
  1461. /* Enable power for HASH1 hardware block */
  1462. device_data->regulator = regulator_get(dev, "v-ape");
  1463. if (IS_ERR(device_data->regulator)) {
  1464. dev_err(dev, "[%s] regulator_get() failed!", __func__);
  1465. ret = PTR_ERR(device_data->regulator);
  1466. device_data->regulator = NULL;
  1467. goto out_unmap;
  1468. }
  1469. /* Enable the clock for HASH1 hardware block */
  1470. device_data->clk = clk_get(dev, NULL);
  1471. if (IS_ERR(device_data->clk)) {
  1472. dev_err(dev, "[%s] clk_get() failed!", __func__);
  1473. ret = PTR_ERR(device_data->clk);
  1474. goto out_regulator;
  1475. }
  1476. /* Enable device power (and clock) */
  1477. ret = hash_enable_power(device_data, false);
  1478. if (ret) {
  1479. dev_err(dev, "[%s]: hash_enable_power() failed!", __func__);
  1480. goto out_clk;
  1481. }
  1482. ret = hash_check_hw(device_data);
  1483. if (ret) {
  1484. dev_err(dev, "[%s] hash_check_hw() failed!", __func__);
  1485. goto out_power;
  1486. }
  1487. if (hash_mode == HASH_MODE_DMA)
  1488. hash_dma_setup_channel(device_data, dev);
  1489. platform_set_drvdata(pdev, device_data);
  1490. /* Put the new device into the device list... */
  1491. klist_add_tail(&device_data->list_node, &driver_data.device_list);
  1492. /* ... and signal that a new device is available. */
  1493. up(&driver_data.device_allocation);
  1494. ret = ahash_algs_register_all(device_data);
  1495. if (ret) {
  1496. dev_err(dev, "[%s] ahash_algs_register_all() "
  1497. "failed!", __func__);
  1498. goto out_power;
  1499. }
  1500. dev_info(dev, "[%s] successfully probed\n", __func__);
  1501. return 0;
  1502. out_power:
  1503. hash_disable_power(device_data, false);
  1504. out_clk:
  1505. clk_put(device_data->clk);
  1506. out_regulator:
  1507. regulator_put(device_data->regulator);
  1508. out_unmap:
  1509. iounmap(device_data->base);
  1510. out_free_mem:
  1511. release_mem_region(res->start, resource_size(res));
  1512. out_kfree:
  1513. kfree(device_data);
  1514. out:
  1515. return ret;
  1516. }
  1517. /**
  1518. * ux500_hash_remove - Function that removes the hash device from the platform.
  1519. * @pdev: The platform device.
  1520. */
  1521. static int ux500_hash_remove(struct platform_device *pdev)
  1522. {
  1523. struct resource *res;
  1524. struct hash_device_data *device_data;
  1525. struct device *dev = &pdev->dev;
  1526. device_data = platform_get_drvdata(pdev);
  1527. if (!device_data) {
  1528. dev_err(dev, "[%s]: platform_get_drvdata() failed!",
  1529. __func__);
  1530. return -ENOMEM;
  1531. }
  1532. /* Try to decrease the number of available devices. */
  1533. if (down_trylock(&driver_data.device_allocation))
  1534. return -EBUSY;
  1535. /* Check that the device is free */
  1536. spin_lock(&device_data->ctx_lock);
  1537. /* current_ctx allocates a device, NULL = unallocated */
  1538. if (device_data->current_ctx) {
  1539. /* The device is busy */
  1540. spin_unlock(&device_data->ctx_lock);
  1541. /* Return the device to the pool. */
  1542. up(&driver_data.device_allocation);
  1543. return -EBUSY;
  1544. }
  1545. spin_unlock(&device_data->ctx_lock);
  1546. /* Remove the device from the list */
  1547. if (klist_node_attached(&device_data->list_node))
  1548. klist_remove(&device_data->list_node);
  1549. /* If this was the last device, remove the services */
  1550. if (list_empty(&driver_data.device_list.k_list))
  1551. ahash_algs_unregister_all(device_data);
  1552. if (hash_disable_power(device_data, false))
  1553. dev_err(dev, "[%s]: hash_disable_power() failed",
  1554. __func__);
  1555. clk_put(device_data->clk);
  1556. regulator_put(device_data->regulator);
  1557. iounmap(device_data->base);
  1558. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1559. if (res)
  1560. release_mem_region(res->start, resource_size(res));
  1561. kfree(device_data);
  1562. return 0;
  1563. }
  1564. /**
  1565. * ux500_hash_shutdown - Function that shutdown the hash device.
  1566. * @pdev: The platform device
  1567. */
  1568. static void ux500_hash_shutdown(struct platform_device *pdev)
  1569. {
  1570. struct resource *res = NULL;
  1571. struct hash_device_data *device_data;
  1572. device_data = platform_get_drvdata(pdev);
  1573. if (!device_data) {
  1574. dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!",
  1575. __func__);
  1576. return;
  1577. }
  1578. /* Check that the device is free */
  1579. spin_lock(&device_data->ctx_lock);
  1580. /* current_ctx allocates a device, NULL = unallocated */
  1581. if (!device_data->current_ctx) {
  1582. if (down_trylock(&driver_data.device_allocation))
  1583. dev_dbg(&pdev->dev, "[%s]: Cryp still in use!"
  1584. "Shutting down anyway...", __func__);
  1585. /**
  1586. * (Allocate the device)
  1587. * Need to set this to non-null (dummy) value,
  1588. * to avoid usage if context switching.
  1589. */
  1590. device_data->current_ctx++;
  1591. }
  1592. spin_unlock(&device_data->ctx_lock);
  1593. /* Remove the device from the list */
  1594. if (klist_node_attached(&device_data->list_node))
  1595. klist_remove(&device_data->list_node);
  1596. /* If this was the last device, remove the services */
  1597. if (list_empty(&driver_data.device_list.k_list))
  1598. ahash_algs_unregister_all(device_data);
  1599. iounmap(device_data->base);
  1600. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1601. if (res)
  1602. release_mem_region(res->start, resource_size(res));
  1603. if (hash_disable_power(device_data, false))
  1604. dev_err(&pdev->dev, "[%s] hash_disable_power() failed",
  1605. __func__);
  1606. }
  1607. /**
  1608. * ux500_hash_suspend - Function that suspends the hash device.
  1609. * @pdev: The platform device.
  1610. * @state: -
  1611. */
  1612. static int ux500_hash_suspend(struct platform_device *pdev, pm_message_t state)
  1613. {
  1614. int ret;
  1615. struct hash_device_data *device_data;
  1616. struct hash_ctx *temp_ctx = NULL;
  1617. device_data = platform_get_drvdata(pdev);
  1618. if (!device_data) {
  1619. dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!",
  1620. __func__);
  1621. return -ENOMEM;
  1622. }
  1623. spin_lock(&device_data->ctx_lock);
  1624. if (!device_data->current_ctx)
  1625. device_data->current_ctx++;
  1626. spin_unlock(&device_data->ctx_lock);
  1627. if (device_data->current_ctx == ++temp_ctx) {
  1628. if (down_interruptible(&driver_data.device_allocation))
  1629. dev_dbg(&pdev->dev, "[%s]: down_interruptible() "
  1630. "failed", __func__);
  1631. ret = hash_disable_power(device_data, false);
  1632. } else
  1633. ret = hash_disable_power(device_data, true);
  1634. if (ret)
  1635. dev_err(&pdev->dev, "[%s]: hash_disable_power()", __func__);
  1636. return ret;
  1637. }
  1638. /**
  1639. * ux500_hash_resume - Function that resume the hash device.
  1640. * @pdev: The platform device.
  1641. */
  1642. static int ux500_hash_resume(struct platform_device *pdev)
  1643. {
  1644. int ret = 0;
  1645. struct hash_device_data *device_data;
  1646. struct hash_ctx *temp_ctx = NULL;
  1647. device_data = platform_get_drvdata(pdev);
  1648. if (!device_data) {
  1649. dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!",
  1650. __func__);
  1651. return -ENOMEM;
  1652. }
  1653. spin_lock(&device_data->ctx_lock);
  1654. if (device_data->current_ctx == ++temp_ctx)
  1655. device_data->current_ctx = NULL;
  1656. spin_unlock(&device_data->ctx_lock);
  1657. if (!device_data->current_ctx)
  1658. up(&driver_data.device_allocation);
  1659. else
  1660. ret = hash_enable_power(device_data, true);
  1661. if (ret)
  1662. dev_err(&pdev->dev, "[%s]: hash_enable_power() failed!",
  1663. __func__);
  1664. return ret;
  1665. }
  1666. static struct platform_driver hash_driver = {
  1667. .probe = ux500_hash_probe,
  1668. .remove = ux500_hash_remove,
  1669. .shutdown = ux500_hash_shutdown,
  1670. .suspend = ux500_hash_suspend,
  1671. .resume = ux500_hash_resume,
  1672. .driver = {
  1673. .owner = THIS_MODULE,
  1674. .name = "hash1",
  1675. }
  1676. };
  1677. /**
  1678. * ux500_hash_mod_init - The kernel module init function.
  1679. */
  1680. static int __init ux500_hash_mod_init(void)
  1681. {
  1682. klist_init(&driver_data.device_list, NULL, NULL);
  1683. /* Initialize the semaphore to 0 devices (locked state) */
  1684. sema_init(&driver_data.device_allocation, 0);
  1685. return platform_driver_register(&hash_driver);
  1686. }
  1687. /**
  1688. * ux500_hash_mod_fini - The kernel module exit function.
  1689. */
  1690. static void __exit ux500_hash_mod_fini(void)
  1691. {
  1692. platform_driver_unregister(&hash_driver);
  1693. return;
  1694. }
  1695. module_init(ux500_hash_mod_init);
  1696. module_exit(ux500_hash_mod_fini);
  1697. MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 HASH engine.");
  1698. MODULE_LICENSE("GPL");
  1699. MODULE_ALIAS("sha1-all");
  1700. MODULE_ALIAS("sha256-all");
  1701. MODULE_ALIAS("hmac-sha1-all");
  1702. MODULE_ALIAS("hmac-sha256-all");