hash_core.c 51 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024
  1. /*
  2. * Cryptographic API.
  3. * Support for Nomadik hardware crypto engine.
  4. * Copyright (C) ST-Ericsson SA 2010
  5. * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson
  6. * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson
  7. * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
  8. * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
  9. * Author: Andreas Westin <andreas.westin@stericsson.com> for ST-Ericsson.
  10. * License terms: GNU General Public License (GPL) version 2
  11. */
  12. #include <linux/clk.h>
  13. #include <linux/device.h>
  14. #include <linux/err.h>
  15. #include <linux/init.h>
  16. #include <linux/io.h>
  17. #include <linux/klist.h>
  18. #include <linux/kernel.h>
  19. #include <linux/module.h>
  20. #include <linux/platform_device.h>
  21. #include <linux/crypto.h>
  22. #include <linux/regulator/consumer.h>
  23. #include <linux/dmaengine.h>
  24. #include <linux/bitops.h>
  25. #include <crypto/internal/hash.h>
  26. #include <crypto/sha.h>
  27. #include <crypto/scatterwalk.h>
  28. #include <crypto/algapi.h>
  29. #include <linux/platform_data/crypto-ux500.h>
  30. #include "hash_alg.h"
  31. #define DEV_DBG_NAME "hashX hashX:"
  32. static int hash_mode;
  33. module_param(hash_mode, int, 0);
  34. MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1");
  35. /**
  36. * Pre-calculated empty message digests.
  37. */
  38. static u8 zero_message_hash_sha1[SHA1_DIGEST_SIZE] = {
  39. 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d,
  40. 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90,
  41. 0xaf, 0xd8, 0x07, 0x09
  42. };
  43. static u8 zero_message_hash_sha256[SHA256_DIGEST_SIZE] = {
  44. 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14,
  45. 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24,
  46. 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c,
  47. 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55
  48. };
  49. /* HMAC-SHA1, no key */
  50. static u8 zero_message_hmac_sha1[SHA1_DIGEST_SIZE] = {
  51. 0xfb, 0xdb, 0x1d, 0x1b, 0x18, 0xaa, 0x6c, 0x08,
  52. 0x32, 0x4b, 0x7d, 0x64, 0xb7, 0x1f, 0xb7, 0x63,
  53. 0x70, 0x69, 0x0e, 0x1d
  54. };
  55. /* HMAC-SHA256, no key */
  56. static u8 zero_message_hmac_sha256[SHA256_DIGEST_SIZE] = {
  57. 0xb6, 0x13, 0x67, 0x9a, 0x08, 0x14, 0xd9, 0xec,
  58. 0x77, 0x2f, 0x95, 0xd7, 0x78, 0xc3, 0x5f, 0xc5,
  59. 0xff, 0x16, 0x97, 0xc4, 0x93, 0x71, 0x56, 0x53,
  60. 0xc6, 0xc7, 0x12, 0x14, 0x42, 0x92, 0xc5, 0xad
  61. };
  62. /**
  63. * struct hash_driver_data - data specific to the driver.
  64. *
  65. * @device_list: A list of registered devices to choose from.
  66. * @device_allocation: A semaphore initialized with number of devices.
  67. */
  68. struct hash_driver_data {
  69. struct klist device_list;
  70. struct semaphore device_allocation;
  71. };
  72. static struct hash_driver_data driver_data;
  73. /* Declaration of functions */
  74. /**
  75. * hash_messagepad - Pads a message and write the nblw bits.
  76. * @device_data: Structure for the hash device.
  77. * @message: Last word of a message
  78. * @index_bytes: The number of bytes in the last message
  79. *
  80. * This function manages the final part of the digest calculation, when less
  81. * than 512 bits (64 bytes) remain in message. This means index_bytes < 64.
  82. *
  83. */
  84. static void hash_messagepad(struct hash_device_data *device_data,
  85. const u32 *message, u8 index_bytes);
  86. /**
  87. * release_hash_device - Releases a previously allocated hash device.
  88. * @device_data: Structure for the hash device.
  89. *
  90. */
  91. static void release_hash_device(struct hash_device_data *device_data)
  92. {
  93. spin_lock(&device_data->ctx_lock);
  94. device_data->current_ctx->device = NULL;
  95. device_data->current_ctx = NULL;
  96. spin_unlock(&device_data->ctx_lock);
  97. /*
  98. * The down_interruptible part for this semaphore is called in
  99. * cryp_get_device_data.
  100. */
  101. up(&driver_data.device_allocation);
  102. }
  103. static void hash_dma_setup_channel(struct hash_device_data *device_data,
  104. struct device *dev)
  105. {
  106. struct hash_platform_data *platform_data = dev->platform_data;
  107. struct dma_slave_config conf = {
  108. .direction = DMA_MEM_TO_DEV,
  109. .dst_addr = device_data->phybase + HASH_DMA_FIFO,
  110. .dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
  111. .dst_maxburst = 16,
  112. };
  113. dma_cap_zero(device_data->dma.mask);
  114. dma_cap_set(DMA_SLAVE, device_data->dma.mask);
  115. device_data->dma.cfg_mem2hash = platform_data->mem_to_engine;
  116. device_data->dma.chan_mem2hash =
  117. dma_request_channel(device_data->dma.mask,
  118. platform_data->dma_filter,
  119. device_data->dma.cfg_mem2hash);
  120. dmaengine_slave_config(device_data->dma.chan_mem2hash, &conf);
  121. init_completion(&device_data->dma.complete);
  122. }
  123. static void hash_dma_callback(void *data)
  124. {
  125. struct hash_ctx *ctx = (struct hash_ctx *) data;
  126. complete(&ctx->device->dma.complete);
  127. }
  128. static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
  129. int len, enum dma_data_direction direction)
  130. {
  131. struct dma_async_tx_descriptor *desc = NULL;
  132. struct dma_chan *channel = NULL;
  133. dma_cookie_t cookie;
  134. if (direction != DMA_TO_DEVICE) {
  135. dev_err(ctx->device->dev, "[%s] Invalid DMA direction",
  136. __func__);
  137. return -EFAULT;
  138. }
  139. sg->length = ALIGN(sg->length, HASH_DMA_ALIGN_SIZE);
  140. channel = ctx->device->dma.chan_mem2hash;
  141. ctx->device->dma.sg = sg;
  142. ctx->device->dma.sg_len = dma_map_sg(channel->device->dev,
  143. ctx->device->dma.sg, ctx->device->dma.nents,
  144. direction);
  145. if (!ctx->device->dma.sg_len) {
  146. dev_err(ctx->device->dev,
  147. "[%s]: Could not map the sg list (TO_DEVICE)",
  148. __func__);
  149. return -EFAULT;
  150. }
  151. dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer "
  152. "(TO_DEVICE)", __func__);
  153. desc = channel->device->device_prep_slave_sg(channel,
  154. ctx->device->dma.sg, ctx->device->dma.sg_len,
  155. direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT, NULL);
  156. if (!desc) {
  157. dev_err(ctx->device->dev,
  158. "[%s]: device_prep_slave_sg() failed!", __func__);
  159. return -EFAULT;
  160. }
  161. desc->callback = hash_dma_callback;
  162. desc->callback_param = ctx;
  163. cookie = desc->tx_submit(desc);
  164. dma_async_issue_pending(channel);
  165. return 0;
  166. }
  167. static void hash_dma_done(struct hash_ctx *ctx)
  168. {
  169. struct dma_chan *chan;
  170. chan = ctx->device->dma.chan_mem2hash;
  171. chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
  172. dma_unmap_sg(chan->device->dev, ctx->device->dma.sg,
  173. ctx->device->dma.sg_len, DMA_TO_DEVICE);
  174. }
  175. static int hash_dma_write(struct hash_ctx *ctx,
  176. struct scatterlist *sg, int len)
  177. {
  178. int error = hash_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE);
  179. if (error) {
  180. dev_dbg(ctx->device->dev, "[%s]: hash_set_dma_transfer() "
  181. "failed", __func__);
  182. return error;
  183. }
  184. return len;
  185. }
  186. /**
  187. * get_empty_message_digest - Returns a pre-calculated digest for
  188. * the empty message.
  189. * @device_data: Structure for the hash device.
  190. * @zero_hash: Buffer to return the empty message digest.
  191. * @zero_hash_size: Hash size of the empty message digest.
  192. * @zero_digest: True if zero_digest returned.
  193. */
  194. static int get_empty_message_digest(
  195. struct hash_device_data *device_data,
  196. u8 *zero_hash, u32 *zero_hash_size, bool *zero_digest)
  197. {
  198. int ret = 0;
  199. struct hash_ctx *ctx = device_data->current_ctx;
  200. *zero_digest = false;
  201. /**
  202. * Caller responsible for ctx != NULL.
  203. */
  204. if (HASH_OPER_MODE_HASH == ctx->config.oper_mode) {
  205. if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
  206. memcpy(zero_hash, &zero_message_hash_sha1[0],
  207. SHA1_DIGEST_SIZE);
  208. *zero_hash_size = SHA1_DIGEST_SIZE;
  209. *zero_digest = true;
  210. } else if (HASH_ALGO_SHA256 ==
  211. ctx->config.algorithm) {
  212. memcpy(zero_hash, &zero_message_hash_sha256[0],
  213. SHA256_DIGEST_SIZE);
  214. *zero_hash_size = SHA256_DIGEST_SIZE;
  215. *zero_digest = true;
  216. } else {
  217. dev_err(device_data->dev, "[%s] "
  218. "Incorrect algorithm!"
  219. , __func__);
  220. ret = -EINVAL;
  221. goto out;
  222. }
  223. } else if (HASH_OPER_MODE_HMAC == ctx->config.oper_mode) {
  224. if (!ctx->keylen) {
  225. if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
  226. memcpy(zero_hash, &zero_message_hmac_sha1[0],
  227. SHA1_DIGEST_SIZE);
  228. *zero_hash_size = SHA1_DIGEST_SIZE;
  229. *zero_digest = true;
  230. } else if (HASH_ALGO_SHA256 == ctx->config.algorithm) {
  231. memcpy(zero_hash, &zero_message_hmac_sha256[0],
  232. SHA256_DIGEST_SIZE);
  233. *zero_hash_size = SHA256_DIGEST_SIZE;
  234. *zero_digest = true;
  235. } else {
  236. dev_err(device_data->dev, "[%s] "
  237. "Incorrect algorithm!"
  238. , __func__);
  239. ret = -EINVAL;
  240. goto out;
  241. }
  242. } else {
  243. dev_dbg(device_data->dev, "[%s] Continue hash "
  244. "calculation, since hmac key avalable",
  245. __func__);
  246. }
  247. }
  248. out:
  249. return ret;
  250. }
  251. /**
  252. * hash_disable_power - Request to disable power and clock.
  253. * @device_data: Structure for the hash device.
  254. * @save_device_state: If true, saves the current hw state.
  255. *
  256. * This function request for disabling power (regulator) and clock,
  257. * and could also save current hw state.
  258. */
  259. static int hash_disable_power(
  260. struct hash_device_data *device_data,
  261. bool save_device_state)
  262. {
  263. int ret = 0;
  264. struct device *dev = device_data->dev;
  265. spin_lock(&device_data->power_state_lock);
  266. if (!device_data->power_state)
  267. goto out;
  268. if (save_device_state) {
  269. hash_save_state(device_data,
  270. &device_data->state);
  271. device_data->restore_dev_state = true;
  272. }
  273. clk_disable(device_data->clk);
  274. ret = regulator_disable(device_data->regulator);
  275. if (ret)
  276. dev_err(dev, "[%s] regulator_disable() failed!", __func__);
  277. device_data->power_state = false;
  278. out:
  279. spin_unlock(&device_data->power_state_lock);
  280. return ret;
  281. }
  282. /**
  283. * hash_enable_power - Request to enable power and clock.
  284. * @device_data: Structure for the hash device.
  285. * @restore_device_state: If true, restores a previous saved hw state.
  286. *
  287. * This function request for enabling power (regulator) and clock,
  288. * and could also restore a previously saved hw state.
  289. */
  290. static int hash_enable_power(
  291. struct hash_device_data *device_data,
  292. bool restore_device_state)
  293. {
  294. int ret = 0;
  295. struct device *dev = device_data->dev;
  296. spin_lock(&device_data->power_state_lock);
  297. if (!device_data->power_state) {
  298. ret = regulator_enable(device_data->regulator);
  299. if (ret) {
  300. dev_err(dev, "[%s]: regulator_enable() failed!",
  301. __func__);
  302. goto out;
  303. }
  304. ret = clk_enable(device_data->clk);
  305. if (ret) {
  306. dev_err(dev, "[%s]: clk_enable() failed!",
  307. __func__);
  308. ret = regulator_disable(
  309. device_data->regulator);
  310. goto out;
  311. }
  312. device_data->power_state = true;
  313. }
  314. if (device_data->restore_dev_state) {
  315. if (restore_device_state) {
  316. device_data->restore_dev_state = false;
  317. hash_resume_state(device_data,
  318. &device_data->state);
  319. }
  320. }
  321. out:
  322. spin_unlock(&device_data->power_state_lock);
  323. return ret;
  324. }
  325. /**
  326. * hash_get_device_data - Checks for an available hash device and return it.
  327. * @hash_ctx: Structure for the hash context.
  328. * @device_data: Structure for the hash device.
  329. *
  330. * This function check for an available hash device and return it to
  331. * the caller.
  332. * Note! Caller need to release the device, calling up().
  333. */
  334. static int hash_get_device_data(struct hash_ctx *ctx,
  335. struct hash_device_data **device_data)
  336. {
  337. int ret;
  338. struct klist_iter device_iterator;
  339. struct klist_node *device_node;
  340. struct hash_device_data *local_device_data = NULL;
  341. /* Wait until a device is available */
  342. ret = down_interruptible(&driver_data.device_allocation);
  343. if (ret)
  344. return ret; /* Interrupted */
  345. /* Select a device */
  346. klist_iter_init(&driver_data.device_list, &device_iterator);
  347. device_node = klist_next(&device_iterator);
  348. while (device_node) {
  349. local_device_data = container_of(device_node,
  350. struct hash_device_data, list_node);
  351. spin_lock(&local_device_data->ctx_lock);
  352. /* current_ctx allocates a device, NULL = unallocated */
  353. if (local_device_data->current_ctx) {
  354. device_node = klist_next(&device_iterator);
  355. } else {
  356. local_device_data->current_ctx = ctx;
  357. ctx->device = local_device_data;
  358. spin_unlock(&local_device_data->ctx_lock);
  359. break;
  360. }
  361. spin_unlock(&local_device_data->ctx_lock);
  362. }
  363. klist_iter_exit(&device_iterator);
  364. if (!device_node) {
  365. /**
  366. * No free device found.
  367. * Since we allocated a device with down_interruptible, this
  368. * should not be able to happen.
  369. * Number of available devices, which are contained in
  370. * device_allocation, is therefore decremented by not doing
  371. * an up(device_allocation).
  372. */
  373. return -EBUSY;
  374. }
  375. *device_data = local_device_data;
  376. return 0;
  377. }
  378. /**
  379. * hash_hw_write_key - Writes the key to the hardware registries.
  380. *
  381. * @device_data: Structure for the hash device.
  382. * @key: Key to be written.
  383. * @keylen: The lengt of the key.
  384. *
  385. * Note! This function DOES NOT write to the NBLW registry, even though
  386. * specified in the the hw design spec. Either due to incorrect info in the
  387. * spec or due to a bug in the hw.
  388. */
  389. static void hash_hw_write_key(struct hash_device_data *device_data,
  390. const u8 *key, unsigned int keylen)
  391. {
  392. u32 word = 0;
  393. int nwords = 1;
  394. HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
  395. while (keylen >= 4) {
  396. u32 *key_word = (u32 *)key;
  397. HASH_SET_DIN(key_word, nwords);
  398. keylen -= 4;
  399. key += 4;
  400. }
  401. /* Take care of the remaining bytes in the last word */
  402. if (keylen) {
  403. word = 0;
  404. while (keylen) {
  405. word |= (key[keylen - 1] << (8 * (keylen - 1)));
  406. keylen--;
  407. }
  408. HASH_SET_DIN(&word, nwords);
  409. }
  410. while (device_data->base->str & HASH_STR_DCAL_MASK)
  411. cpu_relax();
  412. HASH_SET_DCAL;
  413. while (device_data->base->str & HASH_STR_DCAL_MASK)
  414. cpu_relax();
  415. }
  416. /**
  417. * init_hash_hw - Initialise the hash hardware for a new calculation.
  418. * @device_data: Structure for the hash device.
  419. * @ctx: The hash context.
  420. *
  421. * This function will enable the bits needed to clear and start a new
  422. * calculation.
  423. */
  424. static int init_hash_hw(struct hash_device_data *device_data,
  425. struct hash_ctx *ctx)
  426. {
  427. int ret = 0;
  428. ret = hash_setconfiguration(device_data, &ctx->config);
  429. if (ret) {
  430. dev_err(device_data->dev, "[%s] hash_setconfiguration() "
  431. "failed!", __func__);
  432. return ret;
  433. }
  434. hash_begin(device_data, ctx);
  435. if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
  436. hash_hw_write_key(device_data, ctx->key, ctx->keylen);
  437. return ret;
  438. }
  439. /**
  440. * hash_get_nents - Return number of entries (nents) in scatterlist (sg).
  441. *
  442. * @sg: Scatterlist.
  443. * @size: Size in bytes.
  444. * @aligned: True if sg data aligned to work in DMA mode.
  445. *
  446. */
  447. static int hash_get_nents(struct scatterlist *sg, int size, bool *aligned)
  448. {
  449. int nents = 0;
  450. bool aligned_data = true;
  451. while (size > 0 && sg) {
  452. nents++;
  453. size -= sg->length;
  454. /* hash_set_dma_transfer will align last nent */
  455. if ((aligned && !IS_ALIGNED(sg->offset, HASH_DMA_ALIGN_SIZE))
  456. || (!IS_ALIGNED(sg->length, HASH_DMA_ALIGN_SIZE) &&
  457. size > 0))
  458. aligned_data = false;
  459. sg = sg_next(sg);
  460. }
  461. if (aligned)
  462. *aligned = aligned_data;
  463. if (size != 0)
  464. return -EFAULT;
  465. return nents;
  466. }
  467. /**
  468. * hash_dma_valid_data - checks for dma valid sg data.
  469. * @sg: Scatterlist.
  470. * @datasize: Datasize in bytes.
  471. *
  472. * NOTE! This function checks for dma valid sg data, since dma
  473. * only accept datasizes of even wordsize.
  474. */
  475. static bool hash_dma_valid_data(struct scatterlist *sg, int datasize)
  476. {
  477. bool aligned;
  478. /* Need to include at least one nent, else error */
  479. if (hash_get_nents(sg, datasize, &aligned) < 1)
  480. return false;
  481. return aligned;
  482. }
  483. /**
  484. * hash_init - Common hash init function for SHA1/SHA2 (SHA256).
  485. * @req: The hash request for the job.
  486. *
  487. * Initialize structures.
  488. */
  489. static int hash_init(struct ahash_request *req)
  490. {
  491. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  492. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  493. struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
  494. if (!ctx->key)
  495. ctx->keylen = 0;
  496. memset(&req_ctx->state, 0, sizeof(struct hash_state));
  497. req_ctx->updated = 0;
  498. if (hash_mode == HASH_MODE_DMA) {
  499. if (req->nbytes < HASH_DMA_ALIGN_SIZE) {
  500. req_ctx->dma_mode = false; /* Don't use DMA */
  501. pr_debug(DEV_DBG_NAME " [%s] DMA mode, but direct "
  502. "to CPU mode for data size < %d",
  503. __func__, HASH_DMA_ALIGN_SIZE);
  504. } else {
  505. if (req->nbytes >= HASH_DMA_PERFORMANCE_MIN_SIZE &&
  506. hash_dma_valid_data(req->src,
  507. req->nbytes)) {
  508. req_ctx->dma_mode = true;
  509. } else {
  510. req_ctx->dma_mode = false;
  511. pr_debug(DEV_DBG_NAME " [%s] DMA mode, but use"
  512. " CPU mode for datalength < %d"
  513. " or non-aligned data, except "
  514. "in last nent", __func__,
  515. HASH_DMA_PERFORMANCE_MIN_SIZE);
  516. }
  517. }
  518. }
  519. return 0;
  520. }
  521. /**
  522. * hash_processblock - This function processes a single block of 512 bits (64
  523. * bytes), word aligned, starting at message.
  524. * @device_data: Structure for the hash device.
  525. * @message: Block (512 bits) of message to be written to
  526. * the HASH hardware.
  527. *
  528. */
  529. static void hash_processblock(
  530. struct hash_device_data *device_data,
  531. const u32 *message, int length)
  532. {
  533. int len = length / HASH_BYTES_PER_WORD;
  534. /*
  535. * NBLW bits. Reset the number of bits in last word (NBLW).
  536. */
  537. HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
  538. /*
  539. * Write message data to the HASH_DIN register.
  540. */
  541. HASH_SET_DIN(message, len);
  542. }
  543. /**
  544. * hash_messagepad - Pads a message and write the nblw bits.
  545. * @device_data: Structure for the hash device.
  546. * @message: Last word of a message.
  547. * @index_bytes: The number of bytes in the last message.
  548. *
  549. * This function manages the final part of the digest calculation, when less
  550. * than 512 bits (64 bytes) remain in message. This means index_bytes < 64.
  551. *
  552. */
  553. static void hash_messagepad(struct hash_device_data *device_data,
  554. const u32 *message, u8 index_bytes)
  555. {
  556. int nwords = 1;
  557. /*
  558. * Clear hash str register, only clear NBLW
  559. * since DCAL will be reset by hardware.
  560. */
  561. HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
  562. /* Main loop */
  563. while (index_bytes >= 4) {
  564. HASH_SET_DIN(message, nwords);
  565. index_bytes -= 4;
  566. message++;
  567. }
  568. if (index_bytes)
  569. HASH_SET_DIN(message, nwords);
  570. while (device_data->base->str & HASH_STR_DCAL_MASK)
  571. cpu_relax();
  572. /* num_of_bytes == 0 => NBLW <- 0 (32 bits valid in DATAIN) */
  573. HASH_SET_NBLW(index_bytes * 8);
  574. dev_dbg(device_data->dev, "[%s] DIN=0x%08x NBLW=%d", __func__,
  575. readl_relaxed(&device_data->base->din),
  576. (int)(readl_relaxed(&device_data->base->str) &
  577. HASH_STR_NBLW_MASK));
  578. HASH_SET_DCAL;
  579. dev_dbg(device_data->dev, "[%s] after dcal -> DIN=0x%08x NBLW=%d",
  580. __func__, readl_relaxed(&device_data->base->din),
  581. (int)(readl_relaxed(&device_data->base->str) &
  582. HASH_STR_NBLW_MASK));
  583. while (device_data->base->str & HASH_STR_DCAL_MASK)
  584. cpu_relax();
  585. }
  586. /**
  587. * hash_incrementlength - Increments the length of the current message.
  588. * @ctx: Hash context
  589. * @incr: Length of message processed already
  590. *
  591. * Overflow cannot occur, because conditions for overflow are checked in
  592. * hash_hw_update.
  593. */
  594. static void hash_incrementlength(struct hash_req_ctx *ctx, u32 incr)
  595. {
  596. ctx->state.length.low_word += incr;
  597. /* Check for wrap-around */
  598. if (ctx->state.length.low_word < incr)
  599. ctx->state.length.high_word++;
  600. }
  601. /**
  602. * hash_setconfiguration - Sets the required configuration for the hash
  603. * hardware.
  604. * @device_data: Structure for the hash device.
  605. * @config: Pointer to a configuration structure.
  606. */
  607. int hash_setconfiguration(struct hash_device_data *device_data,
  608. struct hash_config *config)
  609. {
  610. int ret = 0;
  611. if (config->algorithm != HASH_ALGO_SHA1 &&
  612. config->algorithm != HASH_ALGO_SHA256)
  613. return -EPERM;
  614. /*
  615. * DATAFORM bits. Set the DATAFORM bits to 0b11, which means the data
  616. * to be written to HASH_DIN is considered as 32 bits.
  617. */
  618. HASH_SET_DATA_FORMAT(config->data_format);
  619. /*
  620. * ALGO bit. Set to 0b1 for SHA-1 and 0b0 for SHA-256
  621. */
  622. switch (config->algorithm) {
  623. case HASH_ALGO_SHA1:
  624. HASH_SET_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
  625. break;
  626. case HASH_ALGO_SHA256:
  627. HASH_CLEAR_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
  628. break;
  629. default:
  630. dev_err(device_data->dev, "[%s] Incorrect algorithm.",
  631. __func__);
  632. return -EPERM;
  633. }
  634. /*
  635. * MODE bit. This bit selects between HASH or HMAC mode for the
  636. * selected algorithm. 0b0 = HASH and 0b1 = HMAC.
  637. */
  638. if (HASH_OPER_MODE_HASH == config->oper_mode)
  639. HASH_CLEAR_BITS(&device_data->base->cr,
  640. HASH_CR_MODE_MASK);
  641. else if (HASH_OPER_MODE_HMAC == config->oper_mode) {
  642. HASH_SET_BITS(&device_data->base->cr,
  643. HASH_CR_MODE_MASK);
  644. if (device_data->current_ctx->keylen > HASH_BLOCK_SIZE) {
  645. /* Truncate key to blocksize */
  646. dev_dbg(device_data->dev, "[%s] LKEY set", __func__);
  647. HASH_SET_BITS(&device_data->base->cr,
  648. HASH_CR_LKEY_MASK);
  649. } else {
  650. dev_dbg(device_data->dev, "[%s] LKEY cleared",
  651. __func__);
  652. HASH_CLEAR_BITS(&device_data->base->cr,
  653. HASH_CR_LKEY_MASK);
  654. }
  655. } else { /* Wrong hash mode */
  656. ret = -EPERM;
  657. dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!",
  658. __func__);
  659. }
  660. return ret;
  661. }
  662. /**
  663. * hash_begin - This routine resets some globals and initializes the hash
  664. * hardware.
  665. * @device_data: Structure for the hash device.
  666. * @ctx: Hash context.
  667. */
  668. void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx)
  669. {
  670. /* HW and SW initializations */
  671. /* Note: there is no need to initialize buffer and digest members */
  672. while (device_data->base->str & HASH_STR_DCAL_MASK)
  673. cpu_relax();
  674. /*
  675. * INIT bit. Set this bit to 0b1 to reset the HASH processor core and
  676. * prepare the initialize the HASH accelerator to compute the message
  677. * digest of a new message.
  678. */
  679. HASH_INITIALIZE;
  680. /*
  681. * NBLW bits. Reset the number of bits in last word (NBLW).
  682. */
  683. HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
  684. }
  685. int hash_process_data(
  686. struct hash_device_data *device_data,
  687. struct hash_ctx *ctx, struct hash_req_ctx *req_ctx,
  688. int msg_length, u8 *data_buffer, u8 *buffer, u8 *index)
  689. {
  690. int ret = 0;
  691. u32 count;
  692. do {
  693. if ((*index + msg_length) < HASH_BLOCK_SIZE) {
  694. for (count = 0; count < msg_length; count++) {
  695. buffer[*index + count] =
  696. *(data_buffer + count);
  697. }
  698. *index += msg_length;
  699. msg_length = 0;
  700. } else {
  701. if (req_ctx->updated) {
  702. ret = hash_resume_state(device_data,
  703. &device_data->state);
  704. memmove(req_ctx->state.buffer,
  705. device_data->state.buffer,
  706. HASH_BLOCK_SIZE / sizeof(u32));
  707. if (ret) {
  708. dev_err(device_data->dev, "[%s] "
  709. "hash_resume_state()"
  710. " failed!", __func__);
  711. goto out;
  712. }
  713. } else {
  714. ret = init_hash_hw(device_data, ctx);
  715. if (ret) {
  716. dev_err(device_data->dev, "[%s] "
  717. "init_hash_hw()"
  718. " failed!", __func__);
  719. goto out;
  720. }
  721. req_ctx->updated = 1;
  722. }
  723. /*
  724. * If 'data_buffer' is four byte aligned and
  725. * local buffer does not have any data, we can
  726. * write data directly from 'data_buffer' to
  727. * HW peripheral, otherwise we first copy data
  728. * to a local buffer
  729. */
  730. if ((0 == (((u32)data_buffer) % 4))
  731. && (0 == *index))
  732. hash_processblock(device_data,
  733. (const u32 *)
  734. data_buffer, HASH_BLOCK_SIZE);
  735. else {
  736. for (count = 0; count <
  737. (u32)(HASH_BLOCK_SIZE -
  738. *index);
  739. count++) {
  740. buffer[*index + count] =
  741. *(data_buffer + count);
  742. }
  743. hash_processblock(device_data,
  744. (const u32 *)buffer,
  745. HASH_BLOCK_SIZE);
  746. }
  747. hash_incrementlength(req_ctx, HASH_BLOCK_SIZE);
  748. data_buffer += (HASH_BLOCK_SIZE - *index);
  749. msg_length -= (HASH_BLOCK_SIZE - *index);
  750. *index = 0;
  751. ret = hash_save_state(device_data,
  752. &device_data->state);
  753. memmove(device_data->state.buffer,
  754. req_ctx->state.buffer,
  755. HASH_BLOCK_SIZE / sizeof(u32));
  756. if (ret) {
  757. dev_err(device_data->dev, "[%s] "
  758. "hash_save_state()"
  759. " failed!", __func__);
  760. goto out;
  761. }
  762. }
  763. } while (msg_length != 0);
  764. out:
  765. return ret;
  766. }
  767. /**
  768. * hash_dma_final - The hash dma final function for SHA1/SHA256.
  769. * @req: The hash request for the job.
  770. */
  771. static int hash_dma_final(struct ahash_request *req)
  772. {
  773. int ret = 0;
  774. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  775. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  776. struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
  777. struct hash_device_data *device_data;
  778. u8 digest[SHA256_DIGEST_SIZE];
  779. int bytes_written = 0;
  780. ret = hash_get_device_data(ctx, &device_data);
  781. if (ret)
  782. return ret;
  783. dev_dbg(device_data->dev, "[%s] (ctx=0x%x)!", __func__, (u32) ctx);
  784. if (req_ctx->updated) {
  785. ret = hash_resume_state(device_data, &device_data->state);
  786. if (ret) {
  787. dev_err(device_data->dev, "[%s] hash_resume_state() "
  788. "failed!", __func__);
  789. goto out;
  790. }
  791. }
  792. if (!req_ctx->updated) {
  793. ret = hash_setconfiguration(device_data, &ctx->config);
  794. if (ret) {
  795. dev_err(device_data->dev, "[%s] "
  796. "hash_setconfiguration() failed!",
  797. __func__);
  798. goto out;
  799. }
  800. /* Enable DMA input */
  801. if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode) {
  802. HASH_CLEAR_BITS(&device_data->base->cr,
  803. HASH_CR_DMAE_MASK);
  804. } else {
  805. HASH_SET_BITS(&device_data->base->cr,
  806. HASH_CR_DMAE_MASK);
  807. HASH_SET_BITS(&device_data->base->cr,
  808. HASH_CR_PRIVN_MASK);
  809. }
  810. HASH_INITIALIZE;
  811. if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
  812. hash_hw_write_key(device_data, ctx->key, ctx->keylen);
  813. /* Number of bits in last word = (nbytes * 8) % 32 */
  814. HASH_SET_NBLW((req->nbytes * 8) % 32);
  815. req_ctx->updated = 1;
  816. }
  817. /* Store the nents in the dma struct. */
  818. ctx->device->dma.nents = hash_get_nents(req->src, req->nbytes, NULL);
  819. if (!ctx->device->dma.nents) {
  820. dev_err(device_data->dev, "[%s] "
  821. "ctx->device->dma.nents = 0", __func__);
  822. ret = ctx->device->dma.nents;
  823. goto out;
  824. }
  825. bytes_written = hash_dma_write(ctx, req->src, req->nbytes);
  826. if (bytes_written != req->nbytes) {
  827. dev_err(device_data->dev, "[%s] "
  828. "hash_dma_write() failed!", __func__);
  829. ret = bytes_written;
  830. goto out;
  831. }
  832. wait_for_completion(&ctx->device->dma.complete);
  833. hash_dma_done(ctx);
  834. while (device_data->base->str & HASH_STR_DCAL_MASK)
  835. cpu_relax();
  836. if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
  837. unsigned int keylen = ctx->keylen;
  838. u8 *key = ctx->key;
  839. dev_dbg(device_data->dev, "[%s] keylen: %d", __func__,
  840. ctx->keylen);
  841. hash_hw_write_key(device_data, key, keylen);
  842. }
  843. hash_get_digest(device_data, digest, ctx->config.algorithm);
  844. memcpy(req->result, digest, ctx->digestsize);
  845. out:
  846. release_hash_device(device_data);
  847. /**
  848. * Allocated in setkey, and only used in HMAC.
  849. */
  850. kfree(ctx->key);
  851. return ret;
  852. }
  853. /**
  854. * hash_hw_final - The final hash calculation function
  855. * @req: The hash request for the job.
  856. */
  857. int hash_hw_final(struct ahash_request *req)
  858. {
  859. int ret = 0;
  860. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  861. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  862. struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
  863. struct hash_device_data *device_data;
  864. u8 digest[SHA256_DIGEST_SIZE];
  865. ret = hash_get_device_data(ctx, &device_data);
  866. if (ret)
  867. return ret;
  868. dev_dbg(device_data->dev, "[%s] (ctx=0x%x)!", __func__, (u32) ctx);
  869. if (req_ctx->updated) {
  870. ret = hash_resume_state(device_data, &device_data->state);
  871. if (ret) {
  872. dev_err(device_data->dev, "[%s] hash_resume_state() "
  873. "failed!", __func__);
  874. goto out;
  875. }
  876. } else if (req->nbytes == 0 && ctx->keylen == 0) {
  877. u8 zero_hash[SHA256_DIGEST_SIZE];
  878. u32 zero_hash_size = 0;
  879. bool zero_digest = false;
  880. /**
  881. * Use a pre-calculated empty message digest
  882. * (workaround since hw return zeroes, hw bug!?)
  883. */
  884. ret = get_empty_message_digest(device_data, &zero_hash[0],
  885. &zero_hash_size, &zero_digest);
  886. if (!ret && likely(zero_hash_size == ctx->digestsize) &&
  887. zero_digest) {
  888. memcpy(req->result, &zero_hash[0], ctx->digestsize);
  889. goto out;
  890. } else if (!ret && !zero_digest) {
  891. dev_dbg(device_data->dev, "[%s] HMAC zero msg with "
  892. "key, continue...", __func__);
  893. } else {
  894. dev_err(device_data->dev, "[%s] ret=%d, or wrong "
  895. "digest size? %s", __func__, ret,
  896. (zero_hash_size == ctx->digestsize) ?
  897. "true" : "false");
  898. /* Return error */
  899. goto out;
  900. }
  901. } else if (req->nbytes == 0 && ctx->keylen > 0) {
  902. dev_err(device_data->dev, "[%s] Empty message with "
  903. "keylength > 0, NOT supported.", __func__);
  904. goto out;
  905. }
  906. if (!req_ctx->updated) {
  907. ret = init_hash_hw(device_data, ctx);
  908. if (ret) {
  909. dev_err(device_data->dev, "[%s] init_hash_hw() "
  910. "failed!", __func__);
  911. goto out;
  912. }
  913. }
  914. if (req_ctx->state.index) {
  915. hash_messagepad(device_data, req_ctx->state.buffer,
  916. req_ctx->state.index);
  917. } else {
  918. HASH_SET_DCAL;
  919. while (device_data->base->str & HASH_STR_DCAL_MASK)
  920. cpu_relax();
  921. }
  922. if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
  923. unsigned int keylen = ctx->keylen;
  924. u8 *key = ctx->key;
  925. dev_dbg(device_data->dev, "[%s] keylen: %d", __func__,
  926. ctx->keylen);
  927. hash_hw_write_key(device_data, key, keylen);
  928. }
  929. hash_get_digest(device_data, digest, ctx->config.algorithm);
  930. memcpy(req->result, digest, ctx->digestsize);
  931. out:
  932. release_hash_device(device_data);
  933. /**
  934. * Allocated in setkey, and only used in HMAC.
  935. */
  936. kfree(ctx->key);
  937. return ret;
  938. }
  939. /**
  940. * hash_hw_update - Updates current HASH computation hashing another part of
  941. * the message.
  942. * @req: Byte array containing the message to be hashed (caller
  943. * allocated).
  944. */
  945. int hash_hw_update(struct ahash_request *req)
  946. {
  947. int ret = 0;
  948. u8 index = 0;
  949. u8 *buffer;
  950. struct hash_device_data *device_data;
  951. u8 *data_buffer;
  952. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  953. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  954. struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
  955. struct crypto_hash_walk walk;
  956. int msg_length = crypto_hash_walk_first(req, &walk);
  957. /* Empty message ("") is correct indata */
  958. if (msg_length == 0)
  959. return ret;
  960. index = req_ctx->state.index;
  961. buffer = (u8 *)req_ctx->state.buffer;
  962. /* Check if ctx->state.length + msg_length
  963. overflows */
  964. if (msg_length > (req_ctx->state.length.low_word + msg_length) &&
  965. HASH_HIGH_WORD_MAX_VAL ==
  966. req_ctx->state.length.high_word) {
  967. pr_err(DEV_DBG_NAME " [%s] HASH_MSG_LENGTH_OVERFLOW!",
  968. __func__);
  969. return -EPERM;
  970. }
  971. ret = hash_get_device_data(ctx, &device_data);
  972. if (ret)
  973. return ret;
  974. /* Main loop */
  975. while (0 != msg_length) {
  976. data_buffer = walk.data;
  977. ret = hash_process_data(device_data, ctx, req_ctx, msg_length,
  978. data_buffer, buffer, &index);
  979. if (ret) {
  980. dev_err(device_data->dev, "[%s] hash_internal_hw_"
  981. "update() failed!", __func__);
  982. goto out;
  983. }
  984. msg_length = crypto_hash_walk_done(&walk, 0);
  985. }
  986. req_ctx->state.index = index;
  987. dev_dbg(device_data->dev, "[%s] indata length=%d, bin=%d))",
  988. __func__, req_ctx->state.index,
  989. req_ctx->state.bit_index);
  990. out:
  991. release_hash_device(device_data);
  992. return ret;
  993. }
  994. /**
  995. * hash_resume_state - Function that resumes the state of an calculation.
  996. * @device_data: Pointer to the device structure.
  997. * @device_state: The state to be restored in the hash hardware
  998. */
  999. int hash_resume_state(struct hash_device_data *device_data,
  1000. const struct hash_state *device_state)
  1001. {
  1002. u32 temp_cr;
  1003. s32 count;
  1004. int hash_mode = HASH_OPER_MODE_HASH;
  1005. if (NULL == device_state) {
  1006. dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!",
  1007. __func__);
  1008. return -EPERM;
  1009. }
  1010. /* Check correctness of index and length members */
  1011. if (device_state->index > HASH_BLOCK_SIZE
  1012. || (device_state->length.low_word % HASH_BLOCK_SIZE) != 0) {
  1013. dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!",
  1014. __func__);
  1015. return -EPERM;
  1016. }
  1017. /*
  1018. * INIT bit. Set this bit to 0b1 to reset the HASH processor core and
  1019. * prepare the initialize the HASH accelerator to compute the message
  1020. * digest of a new message.
  1021. */
  1022. HASH_INITIALIZE;
  1023. temp_cr = device_state->temp_cr;
  1024. writel_relaxed(temp_cr & HASH_CR_RESUME_MASK, &device_data->base->cr);
  1025. if (device_data->base->cr & HASH_CR_MODE_MASK)
  1026. hash_mode = HASH_OPER_MODE_HMAC;
  1027. else
  1028. hash_mode = HASH_OPER_MODE_HASH;
  1029. for (count = 0; count < HASH_CSR_COUNT; count++) {
  1030. if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
  1031. break;
  1032. writel_relaxed(device_state->csr[count],
  1033. &device_data->base->csrx[count]);
  1034. }
  1035. writel_relaxed(device_state->csfull, &device_data->base->csfull);
  1036. writel_relaxed(device_state->csdatain, &device_data->base->csdatain);
  1037. writel_relaxed(device_state->str_reg, &device_data->base->str);
  1038. writel_relaxed(temp_cr, &device_data->base->cr);
  1039. return 0;
  1040. }
  1041. /**
  1042. * hash_save_state - Function that saves the state of hardware.
  1043. * @device_data: Pointer to the device structure.
  1044. * @device_state: The strucure where the hardware state should be saved.
  1045. */
  1046. int hash_save_state(struct hash_device_data *device_data,
  1047. struct hash_state *device_state)
  1048. {
  1049. u32 temp_cr;
  1050. u32 count;
  1051. int hash_mode = HASH_OPER_MODE_HASH;
  1052. if (NULL == device_state) {
  1053. dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!",
  1054. __func__);
  1055. return -ENOTSUPP;
  1056. }
  1057. /* Write dummy value to force digest intermediate calculation. This
  1058. * actually makes sure that there isn't any ongoing calculation in the
  1059. * hardware.
  1060. */
  1061. while (device_data->base->str & HASH_STR_DCAL_MASK)
  1062. cpu_relax();
  1063. temp_cr = readl_relaxed(&device_data->base->cr);
  1064. device_state->str_reg = readl_relaxed(&device_data->base->str);
  1065. device_state->din_reg = readl_relaxed(&device_data->base->din);
  1066. if (device_data->base->cr & HASH_CR_MODE_MASK)
  1067. hash_mode = HASH_OPER_MODE_HMAC;
  1068. else
  1069. hash_mode = HASH_OPER_MODE_HASH;
  1070. for (count = 0; count < HASH_CSR_COUNT; count++) {
  1071. if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
  1072. break;
  1073. device_state->csr[count] =
  1074. readl_relaxed(&device_data->base->csrx[count]);
  1075. }
  1076. device_state->csfull = readl_relaxed(&device_data->base->csfull);
  1077. device_state->csdatain = readl_relaxed(&device_data->base->csdatain);
  1078. device_state->temp_cr = temp_cr;
  1079. return 0;
  1080. }
  1081. /**
  1082. * hash_check_hw - This routine checks for peripheral Ids and PCell Ids.
  1083. * @device_data:
  1084. *
  1085. */
  1086. int hash_check_hw(struct hash_device_data *device_data)
  1087. {
  1088. /* Checking Peripheral Ids */
  1089. if (HASH_P_ID0 == readl_relaxed(&device_data->base->periphid0)
  1090. && HASH_P_ID1 == readl_relaxed(&device_data->base->periphid1)
  1091. && HASH_P_ID2 == readl_relaxed(&device_data->base->periphid2)
  1092. && HASH_P_ID3 == readl_relaxed(&device_data->base->periphid3)
  1093. && HASH_CELL_ID0 == readl_relaxed(&device_data->base->cellid0)
  1094. && HASH_CELL_ID1 == readl_relaxed(&device_data->base->cellid1)
  1095. && HASH_CELL_ID2 == readl_relaxed(&device_data->base->cellid2)
  1096. && HASH_CELL_ID3 == readl_relaxed(&device_data->base->cellid3)
  1097. ) {
  1098. return 0;
  1099. }
  1100. dev_err(device_data->dev, "[%s] HASH_UNSUPPORTED_HW!",
  1101. __func__);
  1102. return -ENOTSUPP;
  1103. }
  1104. /**
  1105. * hash_get_digest - Gets the digest.
  1106. * @device_data: Pointer to the device structure.
  1107. * @digest: User allocated byte array for the calculated digest.
  1108. * @algorithm: The algorithm in use.
  1109. */
  1110. void hash_get_digest(struct hash_device_data *device_data,
  1111. u8 *digest, int algorithm)
  1112. {
  1113. u32 temp_hx_val, count;
  1114. int loop_ctr;
  1115. if (algorithm != HASH_ALGO_SHA1 && algorithm != HASH_ALGO_SHA256) {
  1116. dev_err(device_data->dev, "[%s] Incorrect algorithm %d",
  1117. __func__, algorithm);
  1118. return;
  1119. }
  1120. if (algorithm == HASH_ALGO_SHA1)
  1121. loop_ctr = SHA1_DIGEST_SIZE / sizeof(u32);
  1122. else
  1123. loop_ctr = SHA256_DIGEST_SIZE / sizeof(u32);
  1124. dev_dbg(device_data->dev, "[%s] digest array:(0x%x)",
  1125. __func__, (u32) digest);
  1126. /* Copy result into digest array */
  1127. for (count = 0; count < loop_ctr; count++) {
  1128. temp_hx_val = readl_relaxed(&device_data->base->hx[count]);
  1129. digest[count * 4] = (u8) ((temp_hx_val >> 24) & 0xFF);
  1130. digest[count * 4 + 1] = (u8) ((temp_hx_val >> 16) & 0xFF);
  1131. digest[count * 4 + 2] = (u8) ((temp_hx_val >> 8) & 0xFF);
  1132. digest[count * 4 + 3] = (u8) ((temp_hx_val >> 0) & 0xFF);
  1133. }
  1134. }
  1135. /**
  1136. * hash_update - The hash update function for SHA1/SHA2 (SHA256).
  1137. * @req: The hash request for the job.
  1138. */
  1139. static int ahash_update(struct ahash_request *req)
  1140. {
  1141. int ret = 0;
  1142. struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
  1143. if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode)
  1144. ret = hash_hw_update(req);
  1145. /* Skip update for DMA, all data will be passed to DMA in final */
  1146. if (ret) {
  1147. pr_err(DEV_DBG_NAME " [%s] hash_hw_update() failed!",
  1148. __func__);
  1149. }
  1150. return ret;
  1151. }
  1152. /**
  1153. * hash_final - The hash final function for SHA1/SHA2 (SHA256).
  1154. * @req: The hash request for the job.
  1155. */
  1156. static int ahash_final(struct ahash_request *req)
  1157. {
  1158. int ret = 0;
  1159. struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
  1160. pr_debug(DEV_DBG_NAME " [%s] data size: %d", __func__, req->nbytes);
  1161. if ((hash_mode == HASH_MODE_DMA) && req_ctx->dma_mode)
  1162. ret = hash_dma_final(req);
  1163. else
  1164. ret = hash_hw_final(req);
  1165. if (ret) {
  1166. pr_err(DEV_DBG_NAME " [%s] hash_hw/dma_final() failed",
  1167. __func__);
  1168. }
  1169. return ret;
  1170. }
  1171. static int hash_setkey(struct crypto_ahash *tfm,
  1172. const u8 *key, unsigned int keylen, int alg)
  1173. {
  1174. int ret = 0;
  1175. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  1176. /**
  1177. * Freed in final.
  1178. */
  1179. ctx->key = kmemdup(key, keylen, GFP_KERNEL);
  1180. if (!ctx->key) {
  1181. pr_err(DEV_DBG_NAME " [%s] Failed to allocate ctx->key "
  1182. "for %d\n", __func__, alg);
  1183. return -ENOMEM;
  1184. }
  1185. ctx->keylen = keylen;
  1186. return ret;
  1187. }
  1188. static int ahash_sha1_init(struct ahash_request *req)
  1189. {
  1190. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1191. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  1192. ctx->config.data_format = HASH_DATA_8_BITS;
  1193. ctx->config.algorithm = HASH_ALGO_SHA1;
  1194. ctx->config.oper_mode = HASH_OPER_MODE_HASH;
  1195. ctx->digestsize = SHA1_DIGEST_SIZE;
  1196. return hash_init(req);
  1197. }
  1198. static int ahash_sha256_init(struct ahash_request *req)
  1199. {
  1200. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1201. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  1202. ctx->config.data_format = HASH_DATA_8_BITS;
  1203. ctx->config.algorithm = HASH_ALGO_SHA256;
  1204. ctx->config.oper_mode = HASH_OPER_MODE_HASH;
  1205. ctx->digestsize = SHA256_DIGEST_SIZE;
  1206. return hash_init(req);
  1207. }
  1208. static int ahash_sha1_digest(struct ahash_request *req)
  1209. {
  1210. int ret2, ret1;
  1211. ret1 = ahash_sha1_init(req);
  1212. if (ret1)
  1213. goto out;
  1214. ret1 = ahash_update(req);
  1215. ret2 = ahash_final(req);
  1216. out:
  1217. return ret1 ? ret1 : ret2;
  1218. }
  1219. static int ahash_sha256_digest(struct ahash_request *req)
  1220. {
  1221. int ret2, ret1;
  1222. ret1 = ahash_sha256_init(req);
  1223. if (ret1)
  1224. goto out;
  1225. ret1 = ahash_update(req);
  1226. ret2 = ahash_final(req);
  1227. out:
  1228. return ret1 ? ret1 : ret2;
  1229. }
  1230. static int hmac_sha1_init(struct ahash_request *req)
  1231. {
  1232. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1233. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  1234. ctx->config.data_format = HASH_DATA_8_BITS;
  1235. ctx->config.algorithm = HASH_ALGO_SHA1;
  1236. ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
  1237. ctx->digestsize = SHA1_DIGEST_SIZE;
  1238. return hash_init(req);
  1239. }
  1240. static int hmac_sha256_init(struct ahash_request *req)
  1241. {
  1242. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1243. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  1244. ctx->config.data_format = HASH_DATA_8_BITS;
  1245. ctx->config.algorithm = HASH_ALGO_SHA256;
  1246. ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
  1247. ctx->digestsize = SHA256_DIGEST_SIZE;
  1248. return hash_init(req);
  1249. }
  1250. static int hmac_sha1_digest(struct ahash_request *req)
  1251. {
  1252. int ret2, ret1;
  1253. ret1 = hmac_sha1_init(req);
  1254. if (ret1)
  1255. goto out;
  1256. ret1 = ahash_update(req);
  1257. ret2 = ahash_final(req);
  1258. out:
  1259. return ret1 ? ret1 : ret2;
  1260. }
  1261. static int hmac_sha256_digest(struct ahash_request *req)
  1262. {
  1263. int ret2, ret1;
  1264. ret1 = hmac_sha256_init(req);
  1265. if (ret1)
  1266. goto out;
  1267. ret1 = ahash_update(req);
  1268. ret2 = ahash_final(req);
  1269. out:
  1270. return ret1 ? ret1 : ret2;
  1271. }
  1272. static int hmac_sha1_setkey(struct crypto_ahash *tfm,
  1273. const u8 *key, unsigned int keylen)
  1274. {
  1275. return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA1);
  1276. }
  1277. static int hmac_sha256_setkey(struct crypto_ahash *tfm,
  1278. const u8 *key, unsigned int keylen)
  1279. {
  1280. return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA256);
  1281. }
  1282. struct hash_algo_template {
  1283. struct hash_config conf;
  1284. struct ahash_alg hash;
  1285. };
  1286. static int hash_cra_init(struct crypto_tfm *tfm)
  1287. {
  1288. struct hash_ctx *ctx = crypto_tfm_ctx(tfm);
  1289. struct crypto_alg *alg = tfm->__crt_alg;
  1290. struct hash_algo_template *hash_alg;
  1291. hash_alg = container_of(__crypto_ahash_alg(alg),
  1292. struct hash_algo_template,
  1293. hash);
  1294. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  1295. sizeof(struct hash_req_ctx));
  1296. ctx->config.data_format = HASH_DATA_8_BITS;
  1297. ctx->config.algorithm = hash_alg->conf.algorithm;
  1298. ctx->config.oper_mode = hash_alg->conf.oper_mode;
  1299. ctx->digestsize = hash_alg->hash.halg.digestsize;
  1300. return 0;
  1301. }
  1302. static struct hash_algo_template hash_algs[] = {
  1303. {
  1304. .conf.algorithm = HASH_ALGO_SHA1,
  1305. .conf.oper_mode = HASH_OPER_MODE_HASH,
  1306. .hash = {
  1307. .init = hash_init,
  1308. .update = ahash_update,
  1309. .final = ahash_final,
  1310. .digest = ahash_sha1_digest,
  1311. .halg.digestsize = SHA1_DIGEST_SIZE,
  1312. .halg.statesize = sizeof(struct hash_ctx),
  1313. .halg.base = {
  1314. .cra_name = "sha1",
  1315. .cra_driver_name = "sha1-ux500",
  1316. .cra_flags = CRYPTO_ALG_TYPE_AHASH |
  1317. CRYPTO_ALG_ASYNC,
  1318. .cra_blocksize = SHA1_BLOCK_SIZE,
  1319. .cra_ctxsize = sizeof(struct hash_ctx),
  1320. .cra_init = hash_cra_init,
  1321. .cra_module = THIS_MODULE,
  1322. }
  1323. }
  1324. },
  1325. {
  1326. .conf.algorithm = HASH_ALGO_SHA256,
  1327. .conf.oper_mode = HASH_OPER_MODE_HASH,
  1328. .hash = {
  1329. .init = hash_init,
  1330. .update = ahash_update,
  1331. .final = ahash_final,
  1332. .digest = ahash_sha256_digest,
  1333. .halg.digestsize = SHA256_DIGEST_SIZE,
  1334. .halg.statesize = sizeof(struct hash_ctx),
  1335. .halg.base = {
  1336. .cra_name = "sha256",
  1337. .cra_driver_name = "sha256-ux500",
  1338. .cra_flags = CRYPTO_ALG_TYPE_AHASH |
  1339. CRYPTO_ALG_ASYNC,
  1340. .cra_blocksize = SHA256_BLOCK_SIZE,
  1341. .cra_ctxsize = sizeof(struct hash_ctx),
  1342. .cra_type = &crypto_ahash_type,
  1343. .cra_init = hash_cra_init,
  1344. .cra_module = THIS_MODULE,
  1345. }
  1346. }
  1347. },
  1348. {
  1349. .conf.algorithm = HASH_ALGO_SHA1,
  1350. .conf.oper_mode = HASH_OPER_MODE_HMAC,
  1351. .hash = {
  1352. .init = hash_init,
  1353. .update = ahash_update,
  1354. .final = ahash_final,
  1355. .digest = hmac_sha1_digest,
  1356. .setkey = hmac_sha1_setkey,
  1357. .halg.digestsize = SHA1_DIGEST_SIZE,
  1358. .halg.statesize = sizeof(struct hash_ctx),
  1359. .halg.base = {
  1360. .cra_name = "hmac(sha1)",
  1361. .cra_driver_name = "hmac-sha1-ux500",
  1362. .cra_flags = CRYPTO_ALG_TYPE_AHASH |
  1363. CRYPTO_ALG_ASYNC,
  1364. .cra_blocksize = SHA1_BLOCK_SIZE,
  1365. .cra_ctxsize = sizeof(struct hash_ctx),
  1366. .cra_type = &crypto_ahash_type,
  1367. .cra_init = hash_cra_init,
  1368. .cra_module = THIS_MODULE,
  1369. }
  1370. }
  1371. },
  1372. {
  1373. .conf.algorithm = HASH_ALGO_SHA256,
  1374. .conf.oper_mode = HASH_OPER_MODE_HMAC,
  1375. .hash = {
  1376. .init = hash_init,
  1377. .update = ahash_update,
  1378. .final = ahash_final,
  1379. .digest = hmac_sha256_digest,
  1380. .setkey = hmac_sha256_setkey,
  1381. .halg.digestsize = SHA256_DIGEST_SIZE,
  1382. .halg.statesize = sizeof(struct hash_ctx),
  1383. .halg.base = {
  1384. .cra_name = "hmac(sha256)",
  1385. .cra_driver_name = "hmac-sha256-ux500",
  1386. .cra_flags = CRYPTO_ALG_TYPE_AHASH |
  1387. CRYPTO_ALG_ASYNC,
  1388. .cra_blocksize = SHA256_BLOCK_SIZE,
  1389. .cra_ctxsize = sizeof(struct hash_ctx),
  1390. .cra_type = &crypto_ahash_type,
  1391. .cra_init = hash_cra_init,
  1392. .cra_module = THIS_MODULE,
  1393. }
  1394. }
  1395. }
  1396. };
  1397. /**
  1398. * hash_algs_register_all -
  1399. */
  1400. static int ahash_algs_register_all(struct hash_device_data *device_data)
  1401. {
  1402. int ret;
  1403. int i;
  1404. int count;
  1405. for (i = 0; i < ARRAY_SIZE(hash_algs); i++) {
  1406. ret = crypto_register_ahash(&hash_algs[i].hash);
  1407. if (ret) {
  1408. count = i;
  1409. dev_err(device_data->dev, "[%s] alg registration failed",
  1410. hash_algs[i].hash.halg.base.cra_driver_name);
  1411. goto unreg;
  1412. }
  1413. }
  1414. return 0;
  1415. unreg:
  1416. for (i = 0; i < count; i++)
  1417. crypto_unregister_ahash(&hash_algs[i].hash);
  1418. return ret;
  1419. }
  1420. /**
  1421. * hash_algs_unregister_all -
  1422. */
  1423. static void ahash_algs_unregister_all(struct hash_device_data *device_data)
  1424. {
  1425. int i;
  1426. for (i = 0; i < ARRAY_SIZE(hash_algs); i++)
  1427. crypto_unregister_ahash(&hash_algs[i].hash);
  1428. }
  1429. /**
  1430. * ux500_hash_probe - Function that probes the hash hardware.
  1431. * @pdev: The platform device.
  1432. */
  1433. static int ux500_hash_probe(struct platform_device *pdev)
  1434. {
  1435. int ret = 0;
  1436. struct resource *res = NULL;
  1437. struct hash_device_data *device_data;
  1438. struct device *dev = &pdev->dev;
  1439. device_data = kzalloc(sizeof(struct hash_device_data), GFP_ATOMIC);
  1440. if (!device_data) {
  1441. dev_dbg(dev, "[%s] kzalloc() failed!", __func__);
  1442. ret = -ENOMEM;
  1443. goto out;
  1444. }
  1445. device_data->dev = dev;
  1446. device_data->current_ctx = NULL;
  1447. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1448. if (!res) {
  1449. dev_dbg(dev, "[%s] platform_get_resource() failed!", __func__);
  1450. ret = -ENODEV;
  1451. goto out_kfree;
  1452. }
  1453. res = request_mem_region(res->start, resource_size(res), pdev->name);
  1454. if (res == NULL) {
  1455. dev_dbg(dev, "[%s] request_mem_region() failed!", __func__);
  1456. ret = -EBUSY;
  1457. goto out_kfree;
  1458. }
  1459. device_data->phybase = res->start;
  1460. device_data->base = ioremap(res->start, resource_size(res));
  1461. if (!device_data->base) {
  1462. dev_err(dev, "[%s] ioremap() failed!",
  1463. __func__);
  1464. ret = -ENOMEM;
  1465. goto out_free_mem;
  1466. }
  1467. spin_lock_init(&device_data->ctx_lock);
  1468. spin_lock_init(&device_data->power_state_lock);
  1469. /* Enable power for HASH1 hardware block */
  1470. device_data->regulator = regulator_get(dev, "v-ape");
  1471. if (IS_ERR(device_data->regulator)) {
  1472. dev_err(dev, "[%s] regulator_get() failed!", __func__);
  1473. ret = PTR_ERR(device_data->regulator);
  1474. device_data->regulator = NULL;
  1475. goto out_unmap;
  1476. }
  1477. /* Enable the clock for HASH1 hardware block */
  1478. device_data->clk = clk_get(dev, NULL);
  1479. if (IS_ERR(device_data->clk)) {
  1480. dev_err(dev, "[%s] clk_get() failed!", __func__);
  1481. ret = PTR_ERR(device_data->clk);
  1482. goto out_regulator;
  1483. }
  1484. ret = clk_prepare(device_data->clk);
  1485. if (ret) {
  1486. dev_err(dev, "[%s] clk_prepare() failed!", __func__);
  1487. goto out_clk;
  1488. }
  1489. /* Enable device power (and clock) */
  1490. ret = hash_enable_power(device_data, false);
  1491. if (ret) {
  1492. dev_err(dev, "[%s]: hash_enable_power() failed!", __func__);
  1493. goto out_clk_unprepare;
  1494. }
  1495. ret = hash_check_hw(device_data);
  1496. if (ret) {
  1497. dev_err(dev, "[%s] hash_check_hw() failed!", __func__);
  1498. goto out_power;
  1499. }
  1500. if (hash_mode == HASH_MODE_DMA)
  1501. hash_dma_setup_channel(device_data, dev);
  1502. platform_set_drvdata(pdev, device_data);
  1503. /* Put the new device into the device list... */
  1504. klist_add_tail(&device_data->list_node, &driver_data.device_list);
  1505. /* ... and signal that a new device is available. */
  1506. up(&driver_data.device_allocation);
  1507. ret = ahash_algs_register_all(device_data);
  1508. if (ret) {
  1509. dev_err(dev, "[%s] ahash_algs_register_all() "
  1510. "failed!", __func__);
  1511. goto out_power;
  1512. }
  1513. dev_info(dev, "successfully registered\n");
  1514. return 0;
  1515. out_power:
  1516. hash_disable_power(device_data, false);
  1517. out_clk_unprepare:
  1518. clk_unprepare(device_data->clk);
  1519. out_clk:
  1520. clk_put(device_data->clk);
  1521. out_regulator:
  1522. regulator_put(device_data->regulator);
  1523. out_unmap:
  1524. iounmap(device_data->base);
  1525. out_free_mem:
  1526. release_mem_region(res->start, resource_size(res));
  1527. out_kfree:
  1528. kfree(device_data);
  1529. out:
  1530. return ret;
  1531. }
  1532. /**
  1533. * ux500_hash_remove - Function that removes the hash device from the platform.
  1534. * @pdev: The platform device.
  1535. */
  1536. static int ux500_hash_remove(struct platform_device *pdev)
  1537. {
  1538. struct resource *res;
  1539. struct hash_device_data *device_data;
  1540. struct device *dev = &pdev->dev;
  1541. device_data = platform_get_drvdata(pdev);
  1542. if (!device_data) {
  1543. dev_err(dev, "[%s]: platform_get_drvdata() failed!",
  1544. __func__);
  1545. return -ENOMEM;
  1546. }
  1547. /* Try to decrease the number of available devices. */
  1548. if (down_trylock(&driver_data.device_allocation))
  1549. return -EBUSY;
  1550. /* Check that the device is free */
  1551. spin_lock(&device_data->ctx_lock);
  1552. /* current_ctx allocates a device, NULL = unallocated */
  1553. if (device_data->current_ctx) {
  1554. /* The device is busy */
  1555. spin_unlock(&device_data->ctx_lock);
  1556. /* Return the device to the pool. */
  1557. up(&driver_data.device_allocation);
  1558. return -EBUSY;
  1559. }
  1560. spin_unlock(&device_data->ctx_lock);
  1561. /* Remove the device from the list */
  1562. if (klist_node_attached(&device_data->list_node))
  1563. klist_remove(&device_data->list_node);
  1564. /* If this was the last device, remove the services */
  1565. if (list_empty(&driver_data.device_list.k_list))
  1566. ahash_algs_unregister_all(device_data);
  1567. if (hash_disable_power(device_data, false))
  1568. dev_err(dev, "[%s]: hash_disable_power() failed",
  1569. __func__);
  1570. clk_unprepare(device_data->clk);
  1571. clk_put(device_data->clk);
  1572. regulator_put(device_data->regulator);
  1573. iounmap(device_data->base);
  1574. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1575. if (res)
  1576. release_mem_region(res->start, resource_size(res));
  1577. kfree(device_data);
  1578. return 0;
  1579. }
  1580. /**
  1581. * ux500_hash_shutdown - Function that shutdown the hash device.
  1582. * @pdev: The platform device
  1583. */
  1584. static void ux500_hash_shutdown(struct platform_device *pdev)
  1585. {
  1586. struct resource *res = NULL;
  1587. struct hash_device_data *device_data;
  1588. device_data = platform_get_drvdata(pdev);
  1589. if (!device_data) {
  1590. dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!",
  1591. __func__);
  1592. return;
  1593. }
  1594. /* Check that the device is free */
  1595. spin_lock(&device_data->ctx_lock);
  1596. /* current_ctx allocates a device, NULL = unallocated */
  1597. if (!device_data->current_ctx) {
  1598. if (down_trylock(&driver_data.device_allocation))
  1599. dev_dbg(&pdev->dev, "[%s]: Cryp still in use!"
  1600. "Shutting down anyway...", __func__);
  1601. /**
  1602. * (Allocate the device)
  1603. * Need to set this to non-null (dummy) value,
  1604. * to avoid usage if context switching.
  1605. */
  1606. device_data->current_ctx++;
  1607. }
  1608. spin_unlock(&device_data->ctx_lock);
  1609. /* Remove the device from the list */
  1610. if (klist_node_attached(&device_data->list_node))
  1611. klist_remove(&device_data->list_node);
  1612. /* If this was the last device, remove the services */
  1613. if (list_empty(&driver_data.device_list.k_list))
  1614. ahash_algs_unregister_all(device_data);
  1615. iounmap(device_data->base);
  1616. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1617. if (res)
  1618. release_mem_region(res->start, resource_size(res));
  1619. if (hash_disable_power(device_data, false))
  1620. dev_err(&pdev->dev, "[%s] hash_disable_power() failed",
  1621. __func__);
  1622. }
  1623. /**
  1624. * ux500_hash_suspend - Function that suspends the hash device.
  1625. * @dev: Device to suspend.
  1626. */
  1627. static int ux500_hash_suspend(struct device *dev)
  1628. {
  1629. int ret;
  1630. struct hash_device_data *device_data;
  1631. struct hash_ctx *temp_ctx = NULL;
  1632. device_data = dev_get_drvdata(dev);
  1633. if (!device_data) {
  1634. dev_err(dev, "[%s] platform_get_drvdata() failed!", __func__);
  1635. return -ENOMEM;
  1636. }
  1637. spin_lock(&device_data->ctx_lock);
  1638. if (!device_data->current_ctx)
  1639. device_data->current_ctx++;
  1640. spin_unlock(&device_data->ctx_lock);
  1641. if (device_data->current_ctx == ++temp_ctx) {
  1642. if (down_interruptible(&driver_data.device_allocation))
  1643. dev_dbg(dev, "[%s]: down_interruptible() failed",
  1644. __func__);
  1645. ret = hash_disable_power(device_data, false);
  1646. } else
  1647. ret = hash_disable_power(device_data, true);
  1648. if (ret)
  1649. dev_err(dev, "[%s]: hash_disable_power()", __func__);
  1650. return ret;
  1651. }
  1652. /**
  1653. * ux500_hash_resume - Function that resume the hash device.
  1654. * @dev: Device to resume.
  1655. */
  1656. static int ux500_hash_resume(struct device *dev)
  1657. {
  1658. int ret = 0;
  1659. struct hash_device_data *device_data;
  1660. struct hash_ctx *temp_ctx = NULL;
  1661. device_data = dev_get_drvdata(dev);
  1662. if (!device_data) {
  1663. dev_err(dev, "[%s] platform_get_drvdata() failed!", __func__);
  1664. return -ENOMEM;
  1665. }
  1666. spin_lock(&device_data->ctx_lock);
  1667. if (device_data->current_ctx == ++temp_ctx)
  1668. device_data->current_ctx = NULL;
  1669. spin_unlock(&device_data->ctx_lock);
  1670. if (!device_data->current_ctx)
  1671. up(&driver_data.device_allocation);
  1672. else
  1673. ret = hash_enable_power(device_data, true);
  1674. if (ret)
  1675. dev_err(dev, "[%s]: hash_enable_power() failed!", __func__);
  1676. return ret;
  1677. }
  1678. static SIMPLE_DEV_PM_OPS(ux500_hash_pm, ux500_hash_suspend, ux500_hash_resume);
  1679. static struct platform_driver hash_driver = {
  1680. .probe = ux500_hash_probe,
  1681. .remove = ux500_hash_remove,
  1682. .shutdown = ux500_hash_shutdown,
  1683. .driver = {
  1684. .owner = THIS_MODULE,
  1685. .name = "hash1",
  1686. .pm = &ux500_hash_pm,
  1687. }
  1688. };
  1689. /**
  1690. * ux500_hash_mod_init - The kernel module init function.
  1691. */
  1692. static int __init ux500_hash_mod_init(void)
  1693. {
  1694. klist_init(&driver_data.device_list, NULL, NULL);
  1695. /* Initialize the semaphore to 0 devices (locked state) */
  1696. sema_init(&driver_data.device_allocation, 0);
  1697. return platform_driver_register(&hash_driver);
  1698. }
  1699. /**
  1700. * ux500_hash_mod_fini - The kernel module exit function.
  1701. */
  1702. static void __exit ux500_hash_mod_fini(void)
  1703. {
  1704. platform_driver_unregister(&hash_driver);
  1705. }
  1706. module_init(ux500_hash_mod_init);
  1707. module_exit(ux500_hash_mod_fini);
  1708. MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 HASH engine.");
  1709. MODULE_LICENSE("GPL");
  1710. MODULE_ALIAS("sha1-all");
  1711. MODULE_ALIAS("sha256-all");
  1712. MODULE_ALIAS("hmac-sha1-all");
  1713. MODULE_ALIAS("hmac-sha256-all");