remoteproc_core.c 43 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570
  1. /*
  2. * Remote Processor Framework
  3. *
  4. * Copyright (C) 2011 Texas Instruments, Inc.
  5. * Copyright (C) 2011 Google, Inc.
  6. *
  7. * Ohad Ben-Cohen <ohad@wizery.com>
  8. * Brian Swetland <swetland@google.com>
  9. * Mark Grosen <mgrosen@ti.com>
  10. * Fernando Guzman Lugo <fernando.lugo@ti.com>
  11. * Suman Anna <s-anna@ti.com>
  12. * Robert Tivy <rtivy@ti.com>
  13. * Armando Uribe De Leon <x0095078@ti.com>
  14. *
  15. * This program is free software; you can redistribute it and/or
  16. * modify it under the terms of the GNU General Public License
  17. * version 2 as published by the Free Software Foundation.
  18. *
  19. * This program is distributed in the hope that it will be useful,
  20. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  22. * GNU General Public License for more details.
  23. */
  24. #define pr_fmt(fmt) "%s: " fmt, __func__
  25. #include <linux/kernel.h>
  26. #include <linux/module.h>
  27. #include <linux/device.h>
  28. #include <linux/slab.h>
  29. #include <linux/mutex.h>
  30. #include <linux/dma-mapping.h>
  31. #include <linux/firmware.h>
  32. #include <linux/string.h>
  33. #include <linux/debugfs.h>
  34. #include <linux/remoteproc.h>
  35. #include <linux/iommu.h>
  36. #include <linux/klist.h>
  37. #include <linux/elf.h>
  38. #include <linux/virtio_ids.h>
  39. #include <linux/virtio_ring.h>
  40. #include <asm/byteorder.h>
  41. #include "remoteproc_internal.h"
  42. static void klist_rproc_get(struct klist_node *n);
  43. static void klist_rproc_put(struct klist_node *n);
  44. /*
  45. * klist of the available remote processors.
  46. *
  47. * We need this in order to support name-based lookups (needed by the
  48. * rproc_get_by_name()).
  49. *
  50. * That said, we don't use rproc_get_by_name() anymore within the rpmsg
  51. * framework. The use cases that do require its existence should be
  52. * scrutinized, and hopefully migrated to rproc_boot() using device-based
  53. * binding.
  54. *
  55. * If/when this materializes, we could drop the klist (and the by_name
  56. * API).
  57. */
  58. static DEFINE_KLIST(rprocs, klist_rproc_get, klist_rproc_put);
  59. typedef int (*rproc_handle_resources_t)(struct rproc *rproc,
  60. struct resource_table *table, int len);
  61. typedef int (*rproc_handle_resource_t)(struct rproc *rproc, void *, int avail);
  62. /*
  63. * This is the IOMMU fault handler we register with the IOMMU API
  64. * (when relevant; not all remote processors access memory through
  65. * an IOMMU).
  66. *
  67. * IOMMU core will invoke this handler whenever the remote processor
  68. * will try to access an unmapped device address.
  69. *
  70. * Currently this is mostly a stub, but it will be later used to trigger
  71. * the recovery of the remote processor.
  72. */
  73. static int rproc_iommu_fault(struct iommu_domain *domain, struct device *dev,
  74. unsigned long iova, int flags)
  75. {
  76. dev_err(dev, "iommu fault: da 0x%lx flags 0x%x\n", iova, flags);
  77. /*
  78. * Let the iommu core know we're not really handling this fault;
  79. * we just plan to use this as a recovery trigger.
  80. */
  81. return -ENOSYS;
  82. }
  83. static int rproc_enable_iommu(struct rproc *rproc)
  84. {
  85. struct iommu_domain *domain;
  86. struct device *dev = rproc->dev;
  87. int ret;
  88. /*
  89. * We currently use iommu_present() to decide if an IOMMU
  90. * setup is needed.
  91. *
  92. * This works for simple cases, but will easily fail with
  93. * platforms that do have an IOMMU, but not for this specific
  94. * rproc.
  95. *
  96. * This will be easily solved by introducing hw capabilities
  97. * that will be set by the remoteproc driver.
  98. */
  99. if (!iommu_present(dev->bus)) {
  100. dev_dbg(dev, "iommu not found\n");
  101. return 0;
  102. }
  103. domain = iommu_domain_alloc(dev->bus);
  104. if (!domain) {
  105. dev_err(dev, "can't alloc iommu domain\n");
  106. return -ENOMEM;
  107. }
  108. iommu_set_fault_handler(domain, rproc_iommu_fault);
  109. ret = iommu_attach_device(domain, dev);
  110. if (ret) {
  111. dev_err(dev, "can't attach iommu device: %d\n", ret);
  112. goto free_domain;
  113. }
  114. rproc->domain = domain;
  115. return 0;
  116. free_domain:
  117. iommu_domain_free(domain);
  118. return ret;
  119. }
  120. static void rproc_disable_iommu(struct rproc *rproc)
  121. {
  122. struct iommu_domain *domain = rproc->domain;
  123. struct device *dev = rproc->dev;
  124. if (!domain)
  125. return;
  126. iommu_detach_device(domain, dev);
  127. iommu_domain_free(domain);
  128. return;
  129. }
  130. /*
  131. * Some remote processors will ask us to allocate them physically contiguous
  132. * memory regions (which we call "carveouts"), and map them to specific
  133. * device addresses (which are hardcoded in the firmware).
  134. *
  135. * They may then ask us to copy objects into specific device addresses (e.g.
  136. * code/data sections) or expose us certain symbols in other device address
  137. * (e.g. their trace buffer).
  138. *
  139. * This function is an internal helper with which we can go over the allocated
  140. * carveouts and translate specific device address to kernel virtual addresses
  141. * so we can access the referenced memory.
  142. *
  143. * Note: phys_to_virt(iommu_iova_to_phys(rproc->domain, da)) will work too,
  144. * but only on kernel direct mapped RAM memory. Instead, we're just using
  145. * here the output of the DMA API, which should be more correct.
  146. */
  147. static void *rproc_da_to_va(struct rproc *rproc, u64 da, int len)
  148. {
  149. struct rproc_mem_entry *carveout;
  150. void *ptr = NULL;
  151. list_for_each_entry(carveout, &rproc->carveouts, node) {
  152. int offset = da - carveout->da;
  153. /* try next carveout if da is too small */
  154. if (offset < 0)
  155. continue;
  156. /* try next carveout if da is too large */
  157. if (offset + len > carveout->len)
  158. continue;
  159. ptr = carveout->va + offset;
  160. break;
  161. }
  162. return ptr;
  163. }
  164. /**
  165. * rproc_load_segments() - load firmware segments to memory
  166. * @rproc: remote processor which will be booted using these fw segments
  167. * @elf_data: the content of the ELF firmware image
  168. * @len: firmware size (in bytes)
  169. *
  170. * This function loads the firmware segments to memory, where the remote
  171. * processor expects them.
  172. *
  173. * Some remote processors will expect their code and data to be placed
  174. * in specific device addresses, and can't have them dynamically assigned.
  175. *
  176. * We currently support only those kind of remote processors, and expect
  177. * the program header's paddr member to contain those addresses. We then go
  178. * through the physically contiguous "carveout" memory regions which we
  179. * allocated (and mapped) earlier on behalf of the remote processor,
  180. * and "translate" device address to kernel addresses, so we can copy the
  181. * segments where they are expected.
  182. *
  183. * Currently we only support remote processors that required carveout
  184. * allocations and got them mapped onto their iommus. Some processors
  185. * might be different: they might not have iommus, and would prefer to
  186. * directly allocate memory for every segment/resource. This is not yet
  187. * supported, though.
  188. */
  189. static int
  190. rproc_load_segments(struct rproc *rproc, const u8 *elf_data, size_t len)
  191. {
  192. struct device *dev = rproc->dev;
  193. struct elf32_hdr *ehdr;
  194. struct elf32_phdr *phdr;
  195. int i, ret = 0;
  196. ehdr = (struct elf32_hdr *)elf_data;
  197. phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff);
  198. /* go through the available ELF segments */
  199. for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
  200. u32 da = phdr->p_paddr;
  201. u32 memsz = phdr->p_memsz;
  202. u32 filesz = phdr->p_filesz;
  203. u32 offset = phdr->p_offset;
  204. void *ptr;
  205. if (phdr->p_type != PT_LOAD)
  206. continue;
  207. dev_dbg(dev, "phdr: type %d da 0x%x memsz 0x%x filesz 0x%x\n",
  208. phdr->p_type, da, memsz, filesz);
  209. if (filesz > memsz) {
  210. dev_err(dev, "bad phdr filesz 0x%x memsz 0x%x\n",
  211. filesz, memsz);
  212. ret = -EINVAL;
  213. break;
  214. }
  215. if (offset + filesz > len) {
  216. dev_err(dev, "truncated fw: need 0x%x avail 0x%x\n",
  217. offset + filesz, len);
  218. ret = -EINVAL;
  219. break;
  220. }
  221. /* grab the kernel address for this device address */
  222. ptr = rproc_da_to_va(rproc, da, memsz);
  223. if (!ptr) {
  224. dev_err(dev, "bad phdr da 0x%x mem 0x%x\n", da, memsz);
  225. ret = -EINVAL;
  226. break;
  227. }
  228. /* put the segment where the remote processor expects it */
  229. if (phdr->p_filesz)
  230. memcpy(ptr, elf_data + phdr->p_offset, filesz);
  231. /*
  232. * Zero out remaining memory for this segment.
  233. *
  234. * This isn't strictly required since dma_alloc_coherent already
  235. * did this for us. albeit harmless, we may consider removing
  236. * this.
  237. */
  238. if (memsz > filesz)
  239. memset(ptr + filesz, 0, memsz - filesz);
  240. }
  241. return ret;
  242. }
  243. /**
  244. * rproc_handle_early_vdev() - early handle a virtio header resource
  245. * @rproc: the remote processor
  246. * @rsc: the resource descriptor
  247. * @avail: size of available data (for sanity checking the image)
  248. *
  249. * The existence of this virtio hdr resource entry means that the firmware
  250. * of this @rproc supports this virtio device.
  251. *
  252. * Currently we support only a single virtio device of type VIRTIO_ID_RPMSG,
  253. * but the plan is to remove this limitation and support any number
  254. * of virtio devices (and of any type). We'll also add support for dynamically
  255. * adding (and removing) virtio devices over the rpmsg bus, but simple
  256. * firmwares that doesn't want to get involved with rpmsg will be able
  257. * to simply use the resource table for this.
  258. *
  259. * Returns 0 on success, or an appropriate error code otherwise
  260. */
  261. static int rproc_handle_early_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
  262. int avail)
  263. {
  264. struct rproc_vdev *rvdev;
  265. /* make sure resource isn't truncated */
  266. if (sizeof(*rsc) > avail) {
  267. dev_err(rproc->dev, "vdev rsc is truncated\n");
  268. return -EINVAL;
  269. }
  270. /* we only support VIRTIO_ID_RPMSG devices for now */
  271. if (rsc->id != VIRTIO_ID_RPMSG) {
  272. dev_warn(rproc->dev, "unsupported vdev: %d\n", rsc->id);
  273. return -EINVAL;
  274. }
  275. /* we only support a single vdev per rproc for now */
  276. if (rproc->rvdev) {
  277. dev_warn(rproc->dev, "redundant vdev entry\n");
  278. return -EINVAL;
  279. }
  280. rvdev = kzalloc(sizeof(struct rproc_vdev), GFP_KERNEL);
  281. if (!rvdev)
  282. return -ENOMEM;
  283. /* remember the device features */
  284. rvdev->dfeatures = rsc->dfeatures;
  285. rproc->rvdev = rvdev;
  286. rvdev->rproc = rproc;
  287. return 0;
  288. }
  289. /**
  290. * rproc_handle_vdev() - handle a vdev fw resource
  291. * @rproc: the remote processor
  292. * @rsc: the vring resource descriptor
  293. * @avail: size of available data (for sanity checking the image)
  294. *
  295. * This resource entry requires allocation of non-cacheable memory
  296. * for a virtio vring. Currently we only support two vrings per remote
  297. * processor, required for the virtio rpmsg device.
  298. *
  299. * The 'len' member of @rsc should contain the number of buffers this vring
  300. * support and 'da' should either contain the device address where
  301. * the remote processor is expecting the vring, or indicate that
  302. * dynamically allocation of the vring's device address is supported.
  303. *
  304. * Note: 'da' is currently not handled. This will be revised when the generic
  305. * iommu-based DMA API will arrive, or a dynanic & non-iommu use case show
  306. * up. Meanwhile, statically-addressed iommu-based images should use
  307. * RSC_DEVMEM resource entries to map their require 'da' to the physical
  308. * address of their base CMA region.
  309. *
  310. * Returns 0 on success, or an appropriate error code otherwise
  311. */
  312. static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
  313. int avail)
  314. {
  315. struct device *dev = rproc->dev;
  316. struct rproc_vdev *rvdev = rproc->rvdev;
  317. int i;
  318. /* make sure resource isn't truncated */
  319. if (sizeof(*rsc) + rsc->num_of_vrings * sizeof(struct fw_rsc_vdev_vring)
  320. + rsc->config_len > avail) {
  321. dev_err(rproc->dev, "vdev rsc is truncated\n");
  322. return -EINVAL;
  323. }
  324. /* make sure reserved bytes are zeroes */
  325. if (rsc->reserved[0] || rsc->reserved[1]) {
  326. dev_err(dev, "vdev rsc has non zero reserved bytes\n");
  327. return -EINVAL;
  328. }
  329. dev_dbg(dev, "vdev rsc: id %d, dfeatures %x, cfg len %d, %d vrings\n",
  330. rsc->id, rsc->dfeatures, rsc->config_len, rsc->num_of_vrings);
  331. /* no vdev is in place ? */
  332. if (!rvdev) {
  333. dev_err(dev, "vring requested without a virtio dev entry\n");
  334. return -EINVAL;
  335. }
  336. /* we currently support two vrings per rproc (for rx and tx) */
  337. if (rsc->num_of_vrings != ARRAY_SIZE(rvdev->vring)) {
  338. dev_err(dev, "too many vrings: %d\n", rsc->num_of_vrings);
  339. return -EINVAL;
  340. }
  341. /* initialize the vrings */
  342. for (i = 0; i < rsc->num_of_vrings; i++) {
  343. struct fw_rsc_vdev_vring *vring = &rsc->vring[i];
  344. dma_addr_t dma;
  345. int size;
  346. void *va;
  347. /* make sure reserved bytes are zeroes */
  348. if (vring->reserved) {
  349. dev_err(dev, "vring rsc has non zero reserved bytes\n");
  350. return -EINVAL;
  351. }
  352. /* the firmware must provide the expected queue size */
  353. if (!vring->num) {
  354. dev_err(dev, "missing expected queue size\n");
  355. /* potential cleanups are taken care of later on */
  356. return -EINVAL;
  357. }
  358. /* actual size of vring (in bytes) */
  359. size = PAGE_ALIGN(vring_size(vring->num, AMP_VRING_ALIGN));
  360. /*
  361. * Allocate non-cacheable memory for the vring. In the future
  362. * this call will also configure the IOMMU for us
  363. */
  364. va = dma_alloc_coherent(dev, size, &dma, GFP_KERNEL);
  365. if (!va) {
  366. dev_err(dev, "dma_alloc_coherent failed\n");
  367. /* potential cleanups are taken care of later on */
  368. return -EINVAL;
  369. }
  370. dev_dbg(dev, "vring%d: va %p dma %x qsz %d ring size %x\n", i,
  371. va, dma, vring->num, size);
  372. rvdev->vring[i].len = vring->num;
  373. rvdev->vring[i].va = va;
  374. rvdev->vring[i].dma = dma;
  375. }
  376. return 0;
  377. }
  378. /**
  379. * rproc_handle_trace() - handle a shared trace buffer resource
  380. * @rproc: the remote processor
  381. * @rsc: the trace resource descriptor
  382. * @avail: size of available data (for sanity checking the image)
  383. *
  384. * In case the remote processor dumps trace logs into memory,
  385. * export it via debugfs.
  386. *
  387. * Currently, the 'da' member of @rsc should contain the device address
  388. * where the remote processor is dumping the traces. Later we could also
  389. * support dynamically allocating this address using the generic
  390. * DMA API (but currently there isn't a use case for that).
  391. *
  392. * Returns 0 on success, or an appropriate error code otherwise
  393. */
  394. static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc,
  395. int avail)
  396. {
  397. struct rproc_mem_entry *trace;
  398. struct device *dev = rproc->dev;
  399. void *ptr;
  400. char name[15];
  401. if (sizeof(*rsc) > avail) {
  402. dev_err(rproc->dev, "trace rsc is truncated\n");
  403. return -EINVAL;
  404. }
  405. /* make sure reserved bytes are zeroes */
  406. if (rsc->reserved) {
  407. dev_err(dev, "trace rsc has non zero reserved bytes\n");
  408. return -EINVAL;
  409. }
  410. /* what's the kernel address of this resource ? */
  411. ptr = rproc_da_to_va(rproc, rsc->da, rsc->len);
  412. if (!ptr) {
  413. dev_err(dev, "erroneous trace resource entry\n");
  414. return -EINVAL;
  415. }
  416. trace = kzalloc(sizeof(*trace), GFP_KERNEL);
  417. if (!trace) {
  418. dev_err(dev, "kzalloc trace failed\n");
  419. return -ENOMEM;
  420. }
  421. /* set the trace buffer dma properties */
  422. trace->len = rsc->len;
  423. trace->va = ptr;
  424. /* make sure snprintf always null terminates, even if truncating */
  425. snprintf(name, sizeof(name), "trace%d", rproc->num_traces);
  426. /* create the debugfs entry */
  427. trace->priv = rproc_create_trace_file(name, rproc, trace);
  428. if (!trace->priv) {
  429. trace->va = NULL;
  430. kfree(trace);
  431. return -EINVAL;
  432. }
  433. list_add_tail(&trace->node, &rproc->traces);
  434. rproc->num_traces++;
  435. dev_dbg(dev, "%s added: va %p, da 0x%x, len 0x%x\n", name, ptr,
  436. rsc->da, rsc->len);
  437. return 0;
  438. }
  439. /**
  440. * rproc_handle_devmem() - handle devmem resource entry
  441. * @rproc: remote processor handle
  442. * @rsc: the devmem resource entry
  443. * @avail: size of available data (for sanity checking the image)
  444. *
  445. * Remote processors commonly need to access certain on-chip peripherals.
  446. *
  447. * Some of these remote processors access memory via an iommu device,
  448. * and might require us to configure their iommu before they can access
  449. * the on-chip peripherals they need.
  450. *
  451. * This resource entry is a request to map such a peripheral device.
  452. *
  453. * These devmem entries will contain the physical address of the device in
  454. * the 'pa' member. If a specific device address is expected, then 'da' will
  455. * contain it (currently this is the only use case supported). 'len' will
  456. * contain the size of the physical region we need to map.
  457. *
  458. * Currently we just "trust" those devmem entries to contain valid physical
  459. * addresses, but this is going to change: we want the implementations to
  460. * tell us ranges of physical addresses the firmware is allowed to request,
  461. * and not allow firmwares to request access to physical addresses that
  462. * are outside those ranges.
  463. */
  464. static int rproc_handle_devmem(struct rproc *rproc, struct fw_rsc_devmem *rsc,
  465. int avail)
  466. {
  467. struct rproc_mem_entry *mapping;
  468. int ret;
  469. /* no point in handling this resource without a valid iommu domain */
  470. if (!rproc->domain)
  471. return -EINVAL;
  472. if (sizeof(*rsc) > avail) {
  473. dev_err(rproc->dev, "devmem rsc is truncated\n");
  474. return -EINVAL;
  475. }
  476. /* make sure reserved bytes are zeroes */
  477. if (rsc->reserved) {
  478. dev_err(rproc->dev, "devmem rsc has non zero reserved bytes\n");
  479. return -EINVAL;
  480. }
  481. mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
  482. if (!mapping) {
  483. dev_err(rproc->dev, "kzalloc mapping failed\n");
  484. return -ENOMEM;
  485. }
  486. ret = iommu_map(rproc->domain, rsc->da, rsc->pa, rsc->len, rsc->flags);
  487. if (ret) {
  488. dev_err(rproc->dev, "failed to map devmem: %d\n", ret);
  489. goto out;
  490. }
  491. /*
  492. * We'll need this info later when we'll want to unmap everything
  493. * (e.g. on shutdown).
  494. *
  495. * We can't trust the remote processor not to change the resource
  496. * table, so we must maintain this info independently.
  497. */
  498. mapping->da = rsc->da;
  499. mapping->len = rsc->len;
  500. list_add_tail(&mapping->node, &rproc->mappings);
  501. dev_dbg(rproc->dev, "mapped devmem pa 0x%x, da 0x%x, len 0x%x\n",
  502. rsc->pa, rsc->da, rsc->len);
  503. return 0;
  504. out:
  505. kfree(mapping);
  506. return ret;
  507. }
  508. /**
  509. * rproc_handle_carveout() - handle phys contig memory allocation requests
  510. * @rproc: rproc handle
  511. * @rsc: the resource entry
  512. * @avail: size of available data (for image validation)
  513. *
  514. * This function will handle firmware requests for allocation of physically
  515. * contiguous memory regions.
  516. *
  517. * These request entries should come first in the firmware's resource table,
  518. * as other firmware entries might request placing other data objects inside
  519. * these memory regions (e.g. data/code segments, trace resource entries, ...).
  520. *
  521. * Allocating memory this way helps utilizing the reserved physical memory
  522. * (e.g. CMA) more efficiently, and also minimizes the number of TLB entries
  523. * needed to map it (in case @rproc is using an IOMMU). Reducing the TLB
  524. * pressure is important; it may have a substantial impact on performance.
  525. */
  526. static int rproc_handle_carveout(struct rproc *rproc,
  527. struct fw_rsc_carveout *rsc, int avail)
  528. {
  529. struct rproc_mem_entry *carveout, *mapping;
  530. struct device *dev = rproc->dev;
  531. dma_addr_t dma;
  532. void *va;
  533. int ret;
  534. if (sizeof(*rsc) > avail) {
  535. dev_err(rproc->dev, "carveout rsc is truncated\n");
  536. return -EINVAL;
  537. }
  538. /* make sure reserved bytes are zeroes */
  539. if (rsc->reserved) {
  540. dev_err(dev, "carveout rsc has non zero reserved bytes\n");
  541. return -EINVAL;
  542. }
  543. dev_dbg(dev, "carveout rsc: da %x, pa %x, len %x, flags %x\n",
  544. rsc->da, rsc->pa, rsc->len, rsc->flags);
  545. mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
  546. if (!mapping) {
  547. dev_err(dev, "kzalloc mapping failed\n");
  548. return -ENOMEM;
  549. }
  550. carveout = kzalloc(sizeof(*carveout), GFP_KERNEL);
  551. if (!carveout) {
  552. dev_err(dev, "kzalloc carveout failed\n");
  553. ret = -ENOMEM;
  554. goto free_mapping;
  555. }
  556. va = dma_alloc_coherent(dev, rsc->len, &dma, GFP_KERNEL);
  557. if (!va) {
  558. dev_err(dev, "failed to dma alloc carveout: %d\n", rsc->len);
  559. ret = -ENOMEM;
  560. goto free_carv;
  561. }
  562. dev_dbg(dev, "carveout va %p, dma %x, len 0x%x\n", va, dma, rsc->len);
  563. /*
  564. * Ok, this is non-standard.
  565. *
  566. * Sometimes we can't rely on the generic iommu-based DMA API
  567. * to dynamically allocate the device address and then set the IOMMU
  568. * tables accordingly, because some remote processors might
  569. * _require_ us to use hard coded device addresses that their
  570. * firmware was compiled with.
  571. *
  572. * In this case, we must use the IOMMU API directly and map
  573. * the memory to the device address as expected by the remote
  574. * processor.
  575. *
  576. * Obviously such remote processor devices should not be configured
  577. * to use the iommu-based DMA API: we expect 'dma' to contain the
  578. * physical address in this case.
  579. */
  580. if (rproc->domain) {
  581. ret = iommu_map(rproc->domain, rsc->da, dma, rsc->len,
  582. rsc->flags);
  583. if (ret) {
  584. dev_err(dev, "iommu_map failed: %d\n", ret);
  585. goto dma_free;
  586. }
  587. /*
  588. * We'll need this info later when we'll want to unmap
  589. * everything (e.g. on shutdown).
  590. *
  591. * We can't trust the remote processor not to change the
  592. * resource table, so we must maintain this info independently.
  593. */
  594. mapping->da = rsc->da;
  595. mapping->len = rsc->len;
  596. list_add_tail(&mapping->node, &rproc->mappings);
  597. dev_dbg(dev, "carveout mapped 0x%x to 0x%x\n", rsc->da, dma);
  598. /*
  599. * Some remote processors might need to know the pa
  600. * even though they are behind an IOMMU. E.g., OMAP4's
  601. * remote M3 processor needs this so it can control
  602. * on-chip hardware accelerators that are not behind
  603. * the IOMMU, and therefor must know the pa.
  604. *
  605. * Generally we don't want to expose physical addresses
  606. * if we don't have to (remote processors are generally
  607. * _not_ trusted), so we might want to do this only for
  608. * remote processor that _must_ have this (e.g. OMAP4's
  609. * dual M3 subsystem).
  610. */
  611. rsc->pa = dma;
  612. }
  613. carveout->va = va;
  614. carveout->len = rsc->len;
  615. carveout->dma = dma;
  616. carveout->da = rsc->da;
  617. list_add_tail(&carveout->node, &rproc->carveouts);
  618. return 0;
  619. dma_free:
  620. dma_free_coherent(dev, rsc->len, va, dma);
  621. free_carv:
  622. kfree(carveout);
  623. free_mapping:
  624. kfree(mapping);
  625. return ret;
  626. }
  627. /*
  628. * A lookup table for resource handlers. The indices are defined in
  629. * enum fw_resource_type.
  630. */
  631. static rproc_handle_resource_t rproc_handle_rsc[] = {
  632. [RSC_CARVEOUT] = (rproc_handle_resource_t)rproc_handle_carveout,
  633. [RSC_DEVMEM] = (rproc_handle_resource_t)rproc_handle_devmem,
  634. [RSC_TRACE] = (rproc_handle_resource_t)rproc_handle_trace,
  635. [RSC_VDEV] = (rproc_handle_resource_t)rproc_handle_vdev,
  636. };
  637. /* handle firmware resource entries before booting the remote processor */
  638. static int
  639. rproc_handle_boot_rsc(struct rproc *rproc, struct resource_table *table, int len)
  640. {
  641. struct device *dev = rproc->dev;
  642. rproc_handle_resource_t handler;
  643. int ret = 0, i;
  644. for (i = 0; i < table->num; i++) {
  645. int offset = table->offset[i];
  646. struct fw_rsc_hdr *hdr = (void *)table + offset;
  647. int avail = len - offset - sizeof(*hdr);
  648. void *rsc = (void *)hdr + sizeof(*hdr);
  649. /* make sure table isn't truncated */
  650. if (avail < 0) {
  651. dev_err(dev, "rsc table is truncated\n");
  652. return -EINVAL;
  653. }
  654. dev_dbg(dev, "rsc: type %d\n", hdr->type);
  655. if (hdr->type >= RSC_LAST) {
  656. dev_warn(dev, "unsupported resource %d\n", hdr->type);
  657. continue;
  658. }
  659. handler = rproc_handle_rsc[hdr->type];
  660. if (!handler)
  661. continue;
  662. ret = handler(rproc, rsc, avail);
  663. if (ret)
  664. break;
  665. }
  666. return ret;
  667. }
  668. /* handle firmware resource entries while registering the remote processor */
  669. static int
  670. rproc_handle_virtio_rsc(struct rproc *rproc, struct resource_table *table, int len)
  671. {
  672. struct device *dev = rproc->dev;
  673. int ret = 0, i;
  674. for (i = 0; i < table->num; i++) {
  675. int offset = table->offset[i];
  676. struct fw_rsc_hdr *hdr = (void *)table + offset;
  677. int avail = len - offset - sizeof(*hdr);
  678. /* make sure table isn't truncated */
  679. if (avail < 0) {
  680. dev_err(dev, "rsc table is truncated\n");
  681. return -EINVAL;
  682. }
  683. dev_dbg(dev, "%s: rsc type %d\n", __func__, hdr->type);
  684. if (hdr->type == RSC_VDEV) {
  685. struct fw_rsc_vdev *vrsc =
  686. (struct fw_rsc_vdev *)hdr->data;
  687. ret = rproc_handle_early_vdev(rproc, vrsc, avail);
  688. break;
  689. }
  690. }
  691. return ret;
  692. }
  693. /**
  694. * rproc_handle_resources() - find and handle the resource table
  695. * @rproc: the rproc handle
  696. * @elf_data: the content of the ELF firmware image
  697. * @len: firmware size (in bytes)
  698. * @handler: function that should be used to handle the resource table
  699. *
  700. * This function finds the resource table inside the remote processor's
  701. * firmware, and invoke a user-supplied handler with it (we have two
  702. * possible handlers: one is invoked upon registration of @rproc,
  703. * in order to register the supported virito devices, and the other is
  704. * invoked when @rproc is actually booted).
  705. *
  706. * Currently this function fails if a resource table doesn't exist.
  707. * This restriction will be removed when we'll start supporting remote
  708. * processors that don't need a resource table.
  709. */
  710. static int rproc_handle_resources(struct rproc *rproc, const u8 *elf_data,
  711. size_t len, rproc_handle_resources_t handler)
  712. {
  713. struct elf32_hdr *ehdr;
  714. struct elf32_shdr *shdr;
  715. const char *name_table;
  716. struct device *dev = rproc->dev;
  717. int i, ret = -EINVAL;
  718. struct resource_table *table;
  719. ehdr = (struct elf32_hdr *)elf_data;
  720. shdr = (struct elf32_shdr *)(elf_data + ehdr->e_shoff);
  721. name_table = elf_data + shdr[ehdr->e_shstrndx].sh_offset;
  722. /* look for the resource table and handle it */
  723. for (i = 0; i < ehdr->e_shnum; i++, shdr++) {
  724. int size = shdr->sh_size;
  725. int offset = shdr->sh_offset;
  726. if (strcmp(name_table + shdr->sh_name, ".resource_table"))
  727. continue;
  728. table = (struct resource_table *)(elf_data + offset);
  729. /* make sure we have the entire table */
  730. if (offset + size > len) {
  731. dev_err(dev, "resource table truncated\n");
  732. return -EINVAL;
  733. }
  734. /* make sure table has at least the header */
  735. if (sizeof(struct resource_table) > size) {
  736. dev_err(dev, "header-less resource table\n");
  737. return -EINVAL;
  738. }
  739. /* we don't support any version beyond the first */
  740. if (table->ver != 1) {
  741. dev_err(dev, "unsupported fw ver: %d\n", table->ver);
  742. return -EINVAL;
  743. }
  744. /* make sure reserved bytes are zeroes */
  745. if (table->reserved[0] || table->reserved[1]) {
  746. dev_err(dev, "non zero reserved bytes\n");
  747. return -EINVAL;
  748. }
  749. /* make sure the offsets array isn't truncated */
  750. if (table->num * sizeof(table->offset[0]) +
  751. sizeof(struct resource_table) > size) {
  752. dev_err(dev, "resource table incomplete\n");
  753. return -EINVAL;
  754. }
  755. ret = handler(rproc, table, shdr->sh_size);
  756. break;
  757. }
  758. return ret;
  759. }
  760. /**
  761. * rproc_resource_cleanup() - clean up and free all acquired resources
  762. * @rproc: rproc handle
  763. *
  764. * This function will free all resources acquired for @rproc, and it
  765. * is called when @rproc shuts down, or just failed booting.
  766. */
  767. static void rproc_resource_cleanup(struct rproc *rproc)
  768. {
  769. struct rproc_mem_entry *entry, *tmp;
  770. struct device *dev = rproc->dev;
  771. struct rproc_vdev *rvdev = rproc->rvdev;
  772. int i;
  773. /* clean up debugfs trace entries */
  774. list_for_each_entry_safe(entry, tmp, &rproc->traces, node) {
  775. rproc_remove_trace_file(entry->priv);
  776. rproc->num_traces--;
  777. list_del(&entry->node);
  778. kfree(entry);
  779. }
  780. /* free the coherent memory allocated for the vrings */
  781. for (i = 0; rvdev && i < ARRAY_SIZE(rvdev->vring); i++) {
  782. int qsz = rvdev->vring[i].len;
  783. void *va = rvdev->vring[i].va;
  784. int dma = rvdev->vring[i].dma;
  785. /* virtqueue size is expressed in number of buffers supported */
  786. if (qsz) {
  787. /* how many bytes does this vring really occupy ? */
  788. int size = PAGE_ALIGN(vring_size(qsz, AMP_VRING_ALIGN));
  789. dma_free_coherent(rproc->dev, size, va, dma);
  790. rvdev->vring[i].len = 0;
  791. }
  792. }
  793. /* clean up carveout allocations */
  794. list_for_each_entry_safe(entry, tmp, &rproc->carveouts, node) {
  795. dma_free_coherent(dev, entry->len, entry->va, entry->dma);
  796. list_del(&entry->node);
  797. kfree(entry);
  798. }
  799. /* clean up iommu mapping entries */
  800. list_for_each_entry_safe(entry, tmp, &rproc->mappings, node) {
  801. size_t unmapped;
  802. unmapped = iommu_unmap(rproc->domain, entry->da, entry->len);
  803. if (unmapped != entry->len) {
  804. /* nothing much to do besides complaining */
  805. dev_err(dev, "failed to unmap %u/%u\n", entry->len,
  806. unmapped);
  807. }
  808. list_del(&entry->node);
  809. kfree(entry);
  810. }
  811. }
  812. /* make sure this fw image is sane */
  813. static int rproc_fw_sanity_check(struct rproc *rproc, const struct firmware *fw)
  814. {
  815. const char *name = rproc->firmware;
  816. struct device *dev = rproc->dev;
  817. struct elf32_hdr *ehdr;
  818. char class;
  819. if (!fw) {
  820. dev_err(dev, "failed to load %s\n", name);
  821. return -EINVAL;
  822. }
  823. if (fw->size < sizeof(struct elf32_hdr)) {
  824. dev_err(dev, "Image is too small\n");
  825. return -EINVAL;
  826. }
  827. ehdr = (struct elf32_hdr *)fw->data;
  828. /* We only support ELF32 at this point */
  829. class = ehdr->e_ident[EI_CLASS];
  830. if (class != ELFCLASS32) {
  831. dev_err(dev, "Unsupported class: %d\n", class);
  832. return -EINVAL;
  833. }
  834. /* We assume the firmware has the same endianess as the host */
  835. # ifdef __LITTLE_ENDIAN
  836. if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB) {
  837. # else /* BIG ENDIAN */
  838. if (ehdr->e_ident[EI_DATA] != ELFDATA2MSB) {
  839. # endif
  840. dev_err(dev, "Unsupported firmware endianess\n");
  841. return -EINVAL;
  842. }
  843. if (fw->size < ehdr->e_shoff + sizeof(struct elf32_shdr)) {
  844. dev_err(dev, "Image is too small\n");
  845. return -EINVAL;
  846. }
  847. if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
  848. dev_err(dev, "Image is corrupted (bad magic)\n");
  849. return -EINVAL;
  850. }
  851. if (ehdr->e_phnum == 0) {
  852. dev_err(dev, "No loadable segments\n");
  853. return -EINVAL;
  854. }
  855. if (ehdr->e_phoff > fw->size) {
  856. dev_err(dev, "Firmware size is too small\n");
  857. return -EINVAL;
  858. }
  859. return 0;
  860. }
  861. /*
  862. * take a firmware and boot a remote processor with it.
  863. */
  864. static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
  865. {
  866. struct device *dev = rproc->dev;
  867. const char *name = rproc->firmware;
  868. struct elf32_hdr *ehdr;
  869. int ret;
  870. ret = rproc_fw_sanity_check(rproc, fw);
  871. if (ret)
  872. return ret;
  873. ehdr = (struct elf32_hdr *)fw->data;
  874. dev_info(dev, "Booting fw image %s, size %d\n", name, fw->size);
  875. /*
  876. * if enabling an IOMMU isn't relevant for this rproc, this is
  877. * just a nop
  878. */
  879. ret = rproc_enable_iommu(rproc);
  880. if (ret) {
  881. dev_err(dev, "can't enable iommu: %d\n", ret);
  882. return ret;
  883. }
  884. /*
  885. * The ELF entry point is the rproc's boot addr (though this is not
  886. * a configurable property of all remote processors: some will always
  887. * boot at a specific hardcoded address).
  888. */
  889. rproc->bootaddr = ehdr->e_entry;
  890. /* handle fw resources which are required to boot rproc */
  891. ret = rproc_handle_resources(rproc, fw->data, fw->size,
  892. rproc_handle_boot_rsc);
  893. if (ret) {
  894. dev_err(dev, "Failed to process resources: %d\n", ret);
  895. goto clean_up;
  896. }
  897. /* load the ELF segments to memory */
  898. ret = rproc_load_segments(rproc, fw->data, fw->size);
  899. if (ret) {
  900. dev_err(dev, "Failed to load program segments: %d\n", ret);
  901. goto clean_up;
  902. }
  903. /* power up the remote processor */
  904. ret = rproc->ops->start(rproc);
  905. if (ret) {
  906. dev_err(dev, "can't start rproc %s: %d\n", rproc->name, ret);
  907. goto clean_up;
  908. }
  909. rproc->state = RPROC_RUNNING;
  910. dev_info(dev, "remote processor %s is now up\n", rproc->name);
  911. return 0;
  912. clean_up:
  913. rproc_resource_cleanup(rproc);
  914. rproc_disable_iommu(rproc);
  915. return ret;
  916. }
  917. /*
  918. * take a firmware and look for virtio devices to register.
  919. *
  920. * Note: this function is called asynchronously upon registration of the
  921. * remote processor (so we must wait until it completes before we try
  922. * to unregister the device. one other option is just to use kref here,
  923. * that might be cleaner).
  924. */
  925. static void rproc_fw_config_virtio(const struct firmware *fw, void *context)
  926. {
  927. struct rproc *rproc = context;
  928. struct device *dev = rproc->dev;
  929. int ret;
  930. if (rproc_fw_sanity_check(rproc, fw) < 0)
  931. goto out;
  932. /* does the fw support any virtio devices ? */
  933. ret = rproc_handle_resources(rproc, fw->data, fw->size,
  934. rproc_handle_virtio_rsc);
  935. if (ret) {
  936. dev_info(dev, "No fw virtio device was found\n");
  937. goto out;
  938. }
  939. /* add the virtio device (currently only rpmsg vdevs are supported) */
  940. ret = rproc_add_rpmsg_vdev(rproc);
  941. if (ret)
  942. goto out;
  943. out:
  944. if (fw)
  945. release_firmware(fw);
  946. /* allow rproc_unregister() contexts, if any, to proceed */
  947. complete_all(&rproc->firmware_loading_complete);
  948. }
  949. /**
  950. * rproc_boot() - boot a remote processor
  951. * @rproc: handle of a remote processor
  952. *
  953. * Boot a remote processor (i.e. load its firmware, power it on, ...).
  954. *
  955. * If the remote processor is already powered on, this function immediately
  956. * returns (successfully).
  957. *
  958. * Returns 0 on success, and an appropriate error value otherwise.
  959. */
  960. int rproc_boot(struct rproc *rproc)
  961. {
  962. const struct firmware *firmware_p;
  963. struct device *dev;
  964. int ret;
  965. if (!rproc) {
  966. pr_err("invalid rproc handle\n");
  967. return -EINVAL;
  968. }
  969. dev = rproc->dev;
  970. ret = mutex_lock_interruptible(&rproc->lock);
  971. if (ret) {
  972. dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret);
  973. return ret;
  974. }
  975. /* loading a firmware is required */
  976. if (!rproc->firmware) {
  977. dev_err(dev, "%s: no firmware to load\n", __func__);
  978. ret = -EINVAL;
  979. goto unlock_mutex;
  980. }
  981. /* prevent underlying implementation from being removed */
  982. if (!try_module_get(dev->driver->owner)) {
  983. dev_err(dev, "%s: can't get owner\n", __func__);
  984. ret = -EINVAL;
  985. goto unlock_mutex;
  986. }
  987. /* skip the boot process if rproc is already powered up */
  988. if (atomic_inc_return(&rproc->power) > 1) {
  989. ret = 0;
  990. goto unlock_mutex;
  991. }
  992. dev_info(dev, "powering up %s\n", rproc->name);
  993. /* load firmware */
  994. ret = request_firmware(&firmware_p, rproc->firmware, dev);
  995. if (ret < 0) {
  996. dev_err(dev, "request_firmware failed: %d\n", ret);
  997. goto downref_rproc;
  998. }
  999. ret = rproc_fw_boot(rproc, firmware_p);
  1000. release_firmware(firmware_p);
  1001. downref_rproc:
  1002. if (ret) {
  1003. module_put(dev->driver->owner);
  1004. atomic_dec(&rproc->power);
  1005. }
  1006. unlock_mutex:
  1007. mutex_unlock(&rproc->lock);
  1008. return ret;
  1009. }
  1010. EXPORT_SYMBOL(rproc_boot);
  1011. /**
  1012. * rproc_shutdown() - power off the remote processor
  1013. * @rproc: the remote processor
  1014. *
  1015. * Power off a remote processor (previously booted with rproc_boot()).
  1016. *
  1017. * In case @rproc is still being used by an additional user(s), then
  1018. * this function will just decrement the power refcount and exit,
  1019. * without really powering off the device.
  1020. *
  1021. * Every call to rproc_boot() must (eventually) be accompanied by a call
  1022. * to rproc_shutdown(). Calling rproc_shutdown() redundantly is a bug.
  1023. *
  1024. * Notes:
  1025. * - we're not decrementing the rproc's refcount, only the power refcount.
  1026. * which means that the @rproc handle stays valid even after rproc_shutdown()
  1027. * returns, and users can still use it with a subsequent rproc_boot(), if
  1028. * needed.
  1029. * - don't call rproc_shutdown() to unroll rproc_get_by_name(), exactly
  1030. * because rproc_shutdown() _does not_ decrement the refcount of @rproc.
  1031. * To decrement the refcount of @rproc, use rproc_put() (but _only_ if
  1032. * you acquired @rproc using rproc_get_by_name()).
  1033. */
  1034. void rproc_shutdown(struct rproc *rproc)
  1035. {
  1036. struct device *dev = rproc->dev;
  1037. int ret;
  1038. ret = mutex_lock_interruptible(&rproc->lock);
  1039. if (ret) {
  1040. dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret);
  1041. return;
  1042. }
  1043. /* if the remote proc is still needed, bail out */
  1044. if (!atomic_dec_and_test(&rproc->power))
  1045. goto out;
  1046. /* power off the remote processor */
  1047. ret = rproc->ops->stop(rproc);
  1048. if (ret) {
  1049. atomic_inc(&rproc->power);
  1050. dev_err(dev, "can't stop rproc: %d\n", ret);
  1051. goto out;
  1052. }
  1053. /* clean up all acquired resources */
  1054. rproc_resource_cleanup(rproc);
  1055. rproc_disable_iommu(rproc);
  1056. rproc->state = RPROC_OFFLINE;
  1057. dev_info(dev, "stopped remote processor %s\n", rproc->name);
  1058. out:
  1059. mutex_unlock(&rproc->lock);
  1060. if (!ret)
  1061. module_put(dev->driver->owner);
  1062. }
  1063. EXPORT_SYMBOL(rproc_shutdown);
  1064. /**
  1065. * rproc_release() - completely deletes the existence of a remote processor
  1066. * @kref: the rproc's kref
  1067. *
  1068. * This function should _never_ be called directly.
  1069. *
  1070. * The only reasonable location to use it is as an argument when kref_put'ing
  1071. * @rproc's refcount.
  1072. *
  1073. * This way it will be called when no one holds a valid pointer to this @rproc
  1074. * anymore (and obviously after it is removed from the rprocs klist).
  1075. *
  1076. * Note: this function is not static because rproc_vdev_release() needs it when
  1077. * it decrements @rproc's refcount.
  1078. */
  1079. void rproc_release(struct kref *kref)
  1080. {
  1081. struct rproc *rproc = container_of(kref, struct rproc, refcount);
  1082. dev_info(rproc->dev, "removing %s\n", rproc->name);
  1083. rproc_delete_debug_dir(rproc);
  1084. /* at this point no one holds a reference to rproc anymore */
  1085. kfree(rproc);
  1086. }
  1087. /* will be called when an rproc is added to the rprocs klist */
  1088. static void klist_rproc_get(struct klist_node *n)
  1089. {
  1090. struct rproc *rproc = container_of(n, struct rproc, node);
  1091. kref_get(&rproc->refcount);
  1092. }
  1093. /* will be called when an rproc is removed from the rprocs klist */
  1094. static void klist_rproc_put(struct klist_node *n)
  1095. {
  1096. struct rproc *rproc = container_of(n, struct rproc, node);
  1097. kref_put(&rproc->refcount, rproc_release);
  1098. }
  1099. static struct rproc *next_rproc(struct klist_iter *i)
  1100. {
  1101. struct klist_node *n;
  1102. n = klist_next(i);
  1103. if (!n)
  1104. return NULL;
  1105. return container_of(n, struct rproc, node);
  1106. }
  1107. /**
  1108. * rproc_get_by_name() - find a remote processor by name and boot it
  1109. * @name: name of the remote processor
  1110. *
  1111. * Finds an rproc handle using the remote processor's name, and then
  1112. * boot it. If it's already powered on, then just immediately return
  1113. * (successfully).
  1114. *
  1115. * Returns the rproc handle on success, and NULL on failure.
  1116. *
  1117. * This function increments the remote processor's refcount, so always
  1118. * use rproc_put() to decrement it back once rproc isn't needed anymore.
  1119. *
  1120. * Note: currently this function (and its counterpart rproc_put()) are not
  1121. * used anymore by the rpmsg subsystem. We need to scrutinize the use cases
  1122. * that still need them, and see if we can migrate them to use the non
  1123. * name-based boot/shutdown interface.
  1124. */
  1125. struct rproc *rproc_get_by_name(const char *name)
  1126. {
  1127. struct rproc *rproc;
  1128. struct klist_iter i;
  1129. int ret;
  1130. /* find the remote processor, and upref its refcount */
  1131. klist_iter_init(&rprocs, &i);
  1132. while ((rproc = next_rproc(&i)) != NULL)
  1133. if (!strcmp(rproc->name, name)) {
  1134. kref_get(&rproc->refcount);
  1135. break;
  1136. }
  1137. klist_iter_exit(&i);
  1138. /* can't find this rproc ? */
  1139. if (!rproc) {
  1140. pr_err("can't find remote processor %s\n", name);
  1141. return NULL;
  1142. }
  1143. ret = rproc_boot(rproc);
  1144. if (ret < 0) {
  1145. kref_put(&rproc->refcount, rproc_release);
  1146. return NULL;
  1147. }
  1148. return rproc;
  1149. }
  1150. EXPORT_SYMBOL(rproc_get_by_name);
  1151. /**
  1152. * rproc_put() - decrement the refcount of a remote processor, and shut it down
  1153. * @rproc: the remote processor
  1154. *
  1155. * This function tries to shutdown @rproc, and it then decrements its
  1156. * refcount.
  1157. *
  1158. * After this function returns, @rproc may _not_ be used anymore, and its
  1159. * handle should be considered invalid.
  1160. *
  1161. * This function should be called _iff_ the @rproc handle was grabbed by
  1162. * calling rproc_get_by_name().
  1163. */
  1164. void rproc_put(struct rproc *rproc)
  1165. {
  1166. /* try to power off the remote processor */
  1167. rproc_shutdown(rproc);
  1168. /* downref rproc's refcount */
  1169. kref_put(&rproc->refcount, rproc_release);
  1170. }
  1171. EXPORT_SYMBOL(rproc_put);
  1172. /**
  1173. * rproc_register() - register a remote processor
  1174. * @rproc: the remote processor handle to register
  1175. *
  1176. * Registers @rproc with the remoteproc framework, after it has been
  1177. * allocated with rproc_alloc().
  1178. *
  1179. * This is called by the platform-specific rproc implementation, whenever
  1180. * a new remote processor device is probed.
  1181. *
  1182. * Returns 0 on success and an appropriate error code otherwise.
  1183. *
  1184. * Note: this function initiates an asynchronous firmware loading
  1185. * context, which will look for virtio devices supported by the rproc's
  1186. * firmware.
  1187. *
  1188. * If found, those virtio devices will be created and added, so as a result
  1189. * of registering this remote processor, additional virtio drivers will be
  1190. * probed.
  1191. *
  1192. * Currently, though, we only support a single RPMSG virtio vdev per remote
  1193. * processor.
  1194. */
  1195. int rproc_register(struct rproc *rproc)
  1196. {
  1197. struct device *dev = rproc->dev;
  1198. int ret = 0;
  1199. /* expose to rproc_get_by_name users */
  1200. klist_add_tail(&rproc->node, &rprocs);
  1201. dev_info(rproc->dev, "%s is available\n", rproc->name);
  1202. dev_info(dev, "Note: remoteproc is still under development and considered experimental.\n");
  1203. dev_info(dev, "THE BINARY FORMAT IS NOT YET FINALIZED, and backward compatibility isn't yet guaranteed.\n");
  1204. /* create debugfs entries */
  1205. rproc_create_debug_dir(rproc);
  1206. /* rproc_unregister() calls must wait until async loader completes */
  1207. init_completion(&rproc->firmware_loading_complete);
  1208. /*
  1209. * We must retrieve early virtio configuration info from
  1210. * the firmware (e.g. whether to register a virtio rpmsg device,
  1211. * what virtio features does it support, ...).
  1212. *
  1213. * We're initiating an asynchronous firmware loading, so we can
  1214. * be built-in kernel code, without hanging the boot process.
  1215. */
  1216. ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
  1217. rproc->firmware, dev, GFP_KERNEL,
  1218. rproc, rproc_fw_config_virtio);
  1219. if (ret < 0) {
  1220. dev_err(dev, "request_firmware_nowait failed: %d\n", ret);
  1221. complete_all(&rproc->firmware_loading_complete);
  1222. klist_remove(&rproc->node);
  1223. }
  1224. return ret;
  1225. }
  1226. EXPORT_SYMBOL(rproc_register);
  1227. /**
  1228. * rproc_alloc() - allocate a remote processor handle
  1229. * @dev: the underlying device
  1230. * @name: name of this remote processor
  1231. * @ops: platform-specific handlers (mainly start/stop)
  1232. * @firmware: name of firmware file to load
  1233. * @len: length of private data needed by the rproc driver (in bytes)
  1234. *
  1235. * Allocates a new remote processor handle, but does not register
  1236. * it yet.
  1237. *
  1238. * This function should be used by rproc implementations during initialization
  1239. * of the remote processor.
  1240. *
  1241. * After creating an rproc handle using this function, and when ready,
  1242. * implementations should then call rproc_register() to complete
  1243. * the registration of the remote processor.
  1244. *
  1245. * On success the new rproc is returned, and on failure, NULL.
  1246. *
  1247. * Note: _never_ directly deallocate @rproc, even if it was not registered
  1248. * yet. Instead, if you just need to unroll rproc_alloc(), use rproc_free().
  1249. */
  1250. struct rproc *rproc_alloc(struct device *dev, const char *name,
  1251. const struct rproc_ops *ops,
  1252. const char *firmware, int len)
  1253. {
  1254. struct rproc *rproc;
  1255. if (!dev || !name || !ops)
  1256. return NULL;
  1257. rproc = kzalloc(sizeof(struct rproc) + len, GFP_KERNEL);
  1258. if (!rproc) {
  1259. dev_err(dev, "%s: kzalloc failed\n", __func__);
  1260. return NULL;
  1261. }
  1262. rproc->dev = dev;
  1263. rproc->name = name;
  1264. rproc->ops = ops;
  1265. rproc->firmware = firmware;
  1266. rproc->priv = &rproc[1];
  1267. atomic_set(&rproc->power, 0);
  1268. kref_init(&rproc->refcount);
  1269. mutex_init(&rproc->lock);
  1270. INIT_LIST_HEAD(&rproc->carveouts);
  1271. INIT_LIST_HEAD(&rproc->mappings);
  1272. INIT_LIST_HEAD(&rproc->traces);
  1273. rproc->state = RPROC_OFFLINE;
  1274. return rproc;
  1275. }
  1276. EXPORT_SYMBOL(rproc_alloc);
  1277. /**
  1278. * rproc_free() - free an rproc handle that was allocated by rproc_alloc
  1279. * @rproc: the remote processor handle
  1280. *
  1281. * This function should _only_ be used if @rproc was only allocated,
  1282. * but not registered yet.
  1283. *
  1284. * If @rproc was already successfully registered (by calling rproc_register()),
  1285. * then use rproc_unregister() instead.
  1286. */
  1287. void rproc_free(struct rproc *rproc)
  1288. {
  1289. kfree(rproc);
  1290. }
  1291. EXPORT_SYMBOL(rproc_free);
  1292. /**
  1293. * rproc_unregister() - unregister a remote processor
  1294. * @rproc: rproc handle to unregister
  1295. *
  1296. * Unregisters a remote processor, and decrements its refcount.
  1297. * If its refcount drops to zero, then @rproc will be freed. If not,
  1298. * it will be freed later once the last reference is dropped.
  1299. *
  1300. * This function should be called when the platform specific rproc
  1301. * implementation decides to remove the rproc device. it should
  1302. * _only_ be called if a previous invocation of rproc_register()
  1303. * has completed successfully.
  1304. *
  1305. * After rproc_unregister() returns, @rproc is _not_ valid anymore and
  1306. * it shouldn't be used. More specifically, don't call rproc_free()
  1307. * or try to directly free @rproc after rproc_unregister() returns;
  1308. * none of these are needed, and calling them is a bug.
  1309. *
  1310. * Returns 0 on success and -EINVAL if @rproc isn't valid.
  1311. */
  1312. int rproc_unregister(struct rproc *rproc)
  1313. {
  1314. if (!rproc)
  1315. return -EINVAL;
  1316. /* if rproc is just being registered, wait */
  1317. wait_for_completion(&rproc->firmware_loading_complete);
  1318. /* was an rpmsg vdev created ? */
  1319. if (rproc->rvdev)
  1320. rproc_remove_rpmsg_vdev(rproc);
  1321. klist_remove(&rproc->node);
  1322. kref_put(&rproc->refcount, rproc_release);
  1323. return 0;
  1324. }
  1325. EXPORT_SYMBOL(rproc_unregister);
  1326. static int __init remoteproc_init(void)
  1327. {
  1328. rproc_init_debugfs();
  1329. return 0;
  1330. }
  1331. module_init(remoteproc_init);
  1332. static void __exit remoteproc_exit(void)
  1333. {
  1334. rproc_exit_debugfs();
  1335. }
  1336. module_exit(remoteproc_exit);
  1337. MODULE_LICENSE("GPL v2");
  1338. MODULE_DESCRIPTION("Generic Remote Processor Framework");