dma-debug.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816
  1. /*
  2. * Copyright (C) 2008 Advanced Micro Devices, Inc.
  3. *
  4. * Author: Joerg Roedel <joerg.roedel@amd.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published
  8. * by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. #include <linux/scatterlist.h>
  20. #include <linux/dma-mapping.h>
  21. #include <linux/dma-debug.h>
  22. #include <linux/spinlock.h>
  23. #include <linux/debugfs.h>
  24. #include <linux/device.h>
  25. #include <linux/types.h>
  26. #include <linux/sched.h>
  27. #include <linux/list.h>
  28. #include <linux/slab.h>
  29. #define HASH_SIZE 1024ULL
  30. #define HASH_FN_SHIFT 13
  31. #define HASH_FN_MASK (HASH_SIZE - 1)
  32. enum {
  33. dma_debug_single,
  34. dma_debug_page,
  35. dma_debug_sg,
  36. dma_debug_coherent,
  37. };
  38. struct dma_debug_entry {
  39. struct list_head list;
  40. struct device *dev;
  41. int type;
  42. phys_addr_t paddr;
  43. u64 dev_addr;
  44. u64 size;
  45. int direction;
  46. int sg_call_ents;
  47. int sg_mapped_ents;
  48. };
  49. struct hash_bucket {
  50. struct list_head list;
  51. spinlock_t lock;
  52. } ____cacheline_aligned_in_smp;
  53. /* Hash list to save the allocated dma addresses */
  54. static struct hash_bucket dma_entry_hash[HASH_SIZE];
  55. /* List of pre-allocated dma_debug_entry's */
  56. static LIST_HEAD(free_entries);
  57. /* Lock for the list above */
  58. static DEFINE_SPINLOCK(free_entries_lock);
  59. /* Global disable flag - will be set in case of an error */
  60. static bool global_disable __read_mostly;
  61. /* Global error count */
  62. static u32 error_count;
  63. /* Global error show enable*/
  64. static u32 show_all_errors __read_mostly;
  65. /* Number of errors to show */
  66. static u32 show_num_errors = 1;
  67. static u32 num_free_entries;
  68. static u32 min_free_entries;
  69. /* number of preallocated entries requested by kernel cmdline */
  70. static u32 req_entries;
  71. /* debugfs dentry's for the stuff above */
  72. static struct dentry *dma_debug_dent __read_mostly;
  73. static struct dentry *global_disable_dent __read_mostly;
  74. static struct dentry *error_count_dent __read_mostly;
  75. static struct dentry *show_all_errors_dent __read_mostly;
  76. static struct dentry *show_num_errors_dent __read_mostly;
  77. static struct dentry *num_free_entries_dent __read_mostly;
  78. static struct dentry *min_free_entries_dent __read_mostly;
  79. static const char *type2name[4] = { "single", "page",
  80. "scather-gather", "coherent" };
  81. static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
  82. "DMA_FROM_DEVICE", "DMA_NONE" };
  83. /*
  84. * The access to some variables in this macro is racy. We can't use atomic_t
  85. * here because all these variables are exported to debugfs. Some of them even
  86. * writeable. This is also the reason why a lock won't help much. But anyway,
  87. * the races are no big deal. Here is why:
  88. *
  89. * error_count: the addition is racy, but the worst thing that can happen is
  90. * that we don't count some errors
  91. * show_num_errors: the subtraction is racy. Also no big deal because in
  92. * worst case this will result in one warning more in the
  93. * system log than the user configured. This variable is
  94. * writeable via debugfs.
  95. */
  96. #define err_printk(dev, format, arg...) do { \
  97. error_count += 1; \
  98. if (show_all_errors || show_num_errors > 0) { \
  99. WARN(1, "%s %s: " format, \
  100. dev_driver_string(dev), \
  101. dev_name(dev) , ## arg); \
  102. } \
  103. if (!show_all_errors && show_num_errors > 0) \
  104. show_num_errors -= 1; \
  105. } while (0);
  106. /*
  107. * Hash related functions
  108. *
  109. * Every DMA-API request is saved into a struct dma_debug_entry. To
  110. * have quick access to these structs they are stored into a hash.
  111. */
  112. static int hash_fn(struct dma_debug_entry *entry)
  113. {
  114. /*
  115. * Hash function is based on the dma address.
  116. * We use bits 20-27 here as the index into the hash
  117. */
  118. return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
  119. }
  120. /*
  121. * Request exclusive access to a hash bucket for a given dma_debug_entry.
  122. */
  123. static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
  124. unsigned long *flags)
  125. {
  126. int idx = hash_fn(entry);
  127. unsigned long __flags;
  128. spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
  129. *flags = __flags;
  130. return &dma_entry_hash[idx];
  131. }
  132. /*
  133. * Give up exclusive access to the hash bucket
  134. */
  135. static void put_hash_bucket(struct hash_bucket *bucket,
  136. unsigned long *flags)
  137. {
  138. unsigned long __flags = *flags;
  139. spin_unlock_irqrestore(&bucket->lock, __flags);
  140. }
  141. /*
  142. * Search a given entry in the hash bucket list
  143. */
  144. static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket,
  145. struct dma_debug_entry *ref)
  146. {
  147. struct dma_debug_entry *entry;
  148. list_for_each_entry(entry, &bucket->list, list) {
  149. if ((entry->dev_addr == ref->dev_addr) &&
  150. (entry->dev == ref->dev))
  151. return entry;
  152. }
  153. return NULL;
  154. }
  155. /*
  156. * Add an entry to a hash bucket
  157. */
  158. static void hash_bucket_add(struct hash_bucket *bucket,
  159. struct dma_debug_entry *entry)
  160. {
  161. list_add_tail(&entry->list, &bucket->list);
  162. }
  163. /*
  164. * Remove entry from a hash bucket list
  165. */
  166. static void hash_bucket_del(struct dma_debug_entry *entry)
  167. {
  168. list_del(&entry->list);
  169. }
  170. /*
  171. * Wrapper function for adding an entry to the hash.
  172. * This function takes care of locking itself.
  173. */
  174. static void add_dma_entry(struct dma_debug_entry *entry)
  175. {
  176. struct hash_bucket *bucket;
  177. unsigned long flags;
  178. bucket = get_hash_bucket(entry, &flags);
  179. hash_bucket_add(bucket, entry);
  180. put_hash_bucket(bucket, &flags);
  181. }
  182. /* struct dma_entry allocator
  183. *
  184. * The next two functions implement the allocator for
  185. * struct dma_debug_entries.
  186. */
  187. static struct dma_debug_entry *dma_entry_alloc(void)
  188. {
  189. struct dma_debug_entry *entry = NULL;
  190. unsigned long flags;
  191. spin_lock_irqsave(&free_entries_lock, flags);
  192. if (list_empty(&free_entries)) {
  193. printk(KERN_ERR "DMA-API: debugging out of memory "
  194. "- disabling\n");
  195. global_disable = true;
  196. goto out;
  197. }
  198. entry = list_entry(free_entries.next, struct dma_debug_entry, list);
  199. list_del(&entry->list);
  200. memset(entry, 0, sizeof(*entry));
  201. num_free_entries -= 1;
  202. if (num_free_entries < min_free_entries)
  203. min_free_entries = num_free_entries;
  204. out:
  205. spin_unlock_irqrestore(&free_entries_lock, flags);
  206. return entry;
  207. }
  208. static void dma_entry_free(struct dma_debug_entry *entry)
  209. {
  210. unsigned long flags;
  211. /*
  212. * add to beginning of the list - this way the entries are
  213. * more likely cache hot when they are reallocated.
  214. */
  215. spin_lock_irqsave(&free_entries_lock, flags);
  216. list_add(&entry->list, &free_entries);
  217. num_free_entries += 1;
  218. spin_unlock_irqrestore(&free_entries_lock, flags);
  219. }
  220. /*
  221. * DMA-API debugging init code
  222. *
  223. * The init code does two things:
  224. * 1. Initialize core data structures
  225. * 2. Preallocate a given number of dma_debug_entry structs
  226. */
  227. static int prealloc_memory(u32 num_entries)
  228. {
  229. struct dma_debug_entry *entry, *next_entry;
  230. int i;
  231. for (i = 0; i < num_entries; ++i) {
  232. entry = kzalloc(sizeof(*entry), GFP_KERNEL);
  233. if (!entry)
  234. goto out_err;
  235. list_add_tail(&entry->list, &free_entries);
  236. }
  237. num_free_entries = num_entries;
  238. min_free_entries = num_entries;
  239. printk(KERN_INFO "DMA-API: preallocated %d debug entries\n",
  240. num_entries);
  241. return 0;
  242. out_err:
  243. list_for_each_entry_safe(entry, next_entry, &free_entries, list) {
  244. list_del(&entry->list);
  245. kfree(entry);
  246. }
  247. return -ENOMEM;
  248. }
  249. static int dma_debug_fs_init(void)
  250. {
  251. dma_debug_dent = debugfs_create_dir("dma-api", NULL);
  252. if (!dma_debug_dent) {
  253. printk(KERN_ERR "DMA-API: can not create debugfs directory\n");
  254. return -ENOMEM;
  255. }
  256. global_disable_dent = debugfs_create_bool("disabled", 0444,
  257. dma_debug_dent,
  258. (u32 *)&global_disable);
  259. if (!global_disable_dent)
  260. goto out_err;
  261. error_count_dent = debugfs_create_u32("error_count", 0444,
  262. dma_debug_dent, &error_count);
  263. if (!error_count_dent)
  264. goto out_err;
  265. show_all_errors_dent = debugfs_create_u32("all_errors", 0644,
  266. dma_debug_dent,
  267. &show_all_errors);
  268. if (!show_all_errors_dent)
  269. goto out_err;
  270. show_num_errors_dent = debugfs_create_u32("num_errors", 0644,
  271. dma_debug_dent,
  272. &show_num_errors);
  273. if (!show_num_errors_dent)
  274. goto out_err;
  275. num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444,
  276. dma_debug_dent,
  277. &num_free_entries);
  278. if (!num_free_entries_dent)
  279. goto out_err;
  280. min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444,
  281. dma_debug_dent,
  282. &min_free_entries);
  283. if (!min_free_entries_dent)
  284. goto out_err;
  285. return 0;
  286. out_err:
  287. debugfs_remove_recursive(dma_debug_dent);
  288. return -ENOMEM;
  289. }
  290. /*
  291. * Let the architectures decide how many entries should be preallocated.
  292. */
  293. void dma_debug_init(u32 num_entries)
  294. {
  295. int i;
  296. if (global_disable)
  297. return;
  298. for (i = 0; i < HASH_SIZE; ++i) {
  299. INIT_LIST_HEAD(&dma_entry_hash[i].list);
  300. dma_entry_hash[i].lock = SPIN_LOCK_UNLOCKED;
  301. }
  302. if (dma_debug_fs_init() != 0) {
  303. printk(KERN_ERR "DMA-API: error creating debugfs entries "
  304. "- disabling\n");
  305. global_disable = true;
  306. return;
  307. }
  308. if (req_entries)
  309. num_entries = req_entries;
  310. if (prealloc_memory(num_entries) != 0) {
  311. printk(KERN_ERR "DMA-API: debugging out of memory error "
  312. "- disabled\n");
  313. global_disable = true;
  314. return;
  315. }
  316. printk(KERN_INFO "DMA-API: debugging enabled by kernel config\n");
  317. }
  318. static __init int dma_debug_cmdline(char *str)
  319. {
  320. if (!str)
  321. return -EINVAL;
  322. if (strncmp(str, "off", 3) == 0) {
  323. printk(KERN_INFO "DMA-API: debugging disabled on kernel "
  324. "command line\n");
  325. global_disable = true;
  326. }
  327. return 0;
  328. }
  329. static __init int dma_debug_entries_cmdline(char *str)
  330. {
  331. int res;
  332. if (!str)
  333. return -EINVAL;
  334. res = get_option(&str, &req_entries);
  335. if (!res)
  336. req_entries = 0;
  337. return 0;
  338. }
  339. __setup("dma_debug=", dma_debug_cmdline);
  340. __setup("dma_debug_entries=", dma_debug_entries_cmdline);
  341. static void check_unmap(struct dma_debug_entry *ref)
  342. {
  343. struct dma_debug_entry *entry;
  344. struct hash_bucket *bucket;
  345. unsigned long flags;
  346. if (dma_mapping_error(ref->dev, ref->dev_addr))
  347. return;
  348. bucket = get_hash_bucket(ref, &flags);
  349. entry = hash_bucket_find(bucket, ref);
  350. if (!entry) {
  351. err_printk(ref->dev, "DMA-API: device driver tries "
  352. "to free DMA memory it has not allocated "
  353. "[device address=0x%016llx] [size=%llu bytes]\n",
  354. ref->dev_addr, ref->size);
  355. goto out;
  356. }
  357. if (ref->size != entry->size) {
  358. err_printk(ref->dev, "DMA-API: device driver frees "
  359. "DMA memory with different size "
  360. "[device address=0x%016llx] [map size=%llu bytes] "
  361. "[unmap size=%llu bytes]\n",
  362. ref->dev_addr, entry->size, ref->size);
  363. }
  364. if (ref->type != entry->type) {
  365. err_printk(ref->dev, "DMA-API: device driver frees "
  366. "DMA memory with wrong function "
  367. "[device address=0x%016llx] [size=%llu bytes] "
  368. "[mapped as %s] [unmapped as %s]\n",
  369. ref->dev_addr, ref->size,
  370. type2name[entry->type], type2name[ref->type]);
  371. } else if ((entry->type == dma_debug_coherent) &&
  372. (ref->paddr != entry->paddr)) {
  373. err_printk(ref->dev, "DMA-API: device driver frees "
  374. "DMA memory with different CPU address "
  375. "[device address=0x%016llx] [size=%llu bytes] "
  376. "[cpu alloc address=%p] [cpu free address=%p]",
  377. ref->dev_addr, ref->size,
  378. (void *)entry->paddr, (void *)ref->paddr);
  379. }
  380. if (ref->sg_call_ents && ref->type == dma_debug_sg &&
  381. ref->sg_call_ents != entry->sg_call_ents) {
  382. err_printk(ref->dev, "DMA-API: device driver frees "
  383. "DMA sg list with different entry count "
  384. "[map count=%d] [unmap count=%d]\n",
  385. entry->sg_call_ents, ref->sg_call_ents);
  386. }
  387. /*
  388. * This may be no bug in reality - but most implementations of the
  389. * DMA API don't handle this properly, so check for it here
  390. */
  391. if (ref->direction != entry->direction) {
  392. err_printk(ref->dev, "DMA-API: device driver frees "
  393. "DMA memory with different direction "
  394. "[device address=0x%016llx] [size=%llu bytes] "
  395. "[mapped with %s] [unmapped with %s]\n",
  396. ref->dev_addr, ref->size,
  397. dir2name[entry->direction],
  398. dir2name[ref->direction]);
  399. }
  400. hash_bucket_del(entry);
  401. dma_entry_free(entry);
  402. out:
  403. put_hash_bucket(bucket, &flags);
  404. }
  405. static void check_for_stack(struct device *dev, void *addr)
  406. {
  407. if (object_is_on_stack(addr))
  408. err_printk(dev, "DMA-API: device driver maps memory from stack"
  409. " [addr=%p]\n", addr);
  410. }
  411. static void check_sync(struct device *dev, dma_addr_t addr,
  412. u64 size, u64 offset, int direction, bool to_cpu)
  413. {
  414. struct dma_debug_entry ref = {
  415. .dev = dev,
  416. .dev_addr = addr,
  417. .size = size,
  418. .direction = direction,
  419. };
  420. struct dma_debug_entry *entry;
  421. struct hash_bucket *bucket;
  422. unsigned long flags;
  423. bucket = get_hash_bucket(&ref, &flags);
  424. entry = hash_bucket_find(bucket, &ref);
  425. if (!entry) {
  426. err_printk(dev, "DMA-API: device driver tries "
  427. "to sync DMA memory it has not allocated "
  428. "[device address=0x%016llx] [size=%llu bytes]\n",
  429. addr, size);
  430. goto out;
  431. }
  432. if ((offset + size) > entry->size) {
  433. err_printk(dev, "DMA-API: device driver syncs"
  434. " DMA memory outside allocated range "
  435. "[device address=0x%016llx] "
  436. "[allocation size=%llu bytes] [sync offset=%llu] "
  437. "[sync size=%llu]\n", entry->dev_addr, entry->size,
  438. offset, size);
  439. }
  440. if (direction != entry->direction) {
  441. err_printk(dev, "DMA-API: device driver syncs "
  442. "DMA memory with different direction "
  443. "[device address=0x%016llx] [size=%llu bytes] "
  444. "[mapped with %s] [synced with %s]\n",
  445. addr, entry->size,
  446. dir2name[entry->direction],
  447. dir2name[direction]);
  448. }
  449. if (entry->direction == DMA_BIDIRECTIONAL)
  450. goto out;
  451. if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
  452. !(direction == DMA_TO_DEVICE))
  453. err_printk(dev, "DMA-API: device driver syncs "
  454. "device read-only DMA memory for cpu "
  455. "[device address=0x%016llx] [size=%llu bytes] "
  456. "[mapped with %s] [synced with %s]\n",
  457. addr, entry->size,
  458. dir2name[entry->direction],
  459. dir2name[direction]);
  460. if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
  461. !(direction == DMA_FROM_DEVICE))
  462. err_printk(dev, "DMA-API: device driver syncs "
  463. "device write-only DMA memory to device "
  464. "[device address=0x%016llx] [size=%llu bytes] "
  465. "[mapped with %s] [synced with %s]\n",
  466. addr, entry->size,
  467. dir2name[entry->direction],
  468. dir2name[direction]);
  469. out:
  470. put_hash_bucket(bucket, &flags);
  471. }
  472. void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
  473. size_t size, int direction, dma_addr_t dma_addr,
  474. bool map_single)
  475. {
  476. struct dma_debug_entry *entry;
  477. if (unlikely(global_disable))
  478. return;
  479. if (unlikely(dma_mapping_error(dev, dma_addr)))
  480. return;
  481. entry = dma_entry_alloc();
  482. if (!entry)
  483. return;
  484. entry->dev = dev;
  485. entry->type = dma_debug_page;
  486. entry->paddr = page_to_phys(page) + offset;
  487. entry->dev_addr = dma_addr;
  488. entry->size = size;
  489. entry->direction = direction;
  490. if (map_single) {
  491. entry->type = dma_debug_single;
  492. check_for_stack(dev, page_address(page) + offset);
  493. }
  494. add_dma_entry(entry);
  495. }
  496. EXPORT_SYMBOL(debug_dma_map_page);
  497. void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
  498. size_t size, int direction, bool map_single)
  499. {
  500. struct dma_debug_entry ref = {
  501. .type = dma_debug_page,
  502. .dev = dev,
  503. .dev_addr = addr,
  504. .size = size,
  505. .direction = direction,
  506. };
  507. if (unlikely(global_disable))
  508. return;
  509. if (map_single)
  510. ref.type = dma_debug_single;
  511. check_unmap(&ref);
  512. }
  513. EXPORT_SYMBOL(debug_dma_unmap_page);
  514. void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
  515. int nents, int mapped_ents, int direction)
  516. {
  517. struct dma_debug_entry *entry;
  518. struct scatterlist *s;
  519. int i;
  520. if (unlikely(global_disable))
  521. return;
  522. for_each_sg(sg, s, mapped_ents, i) {
  523. entry = dma_entry_alloc();
  524. if (!entry)
  525. return;
  526. entry->type = dma_debug_sg;
  527. entry->dev = dev;
  528. entry->paddr = sg_phys(s);
  529. entry->size = s->length;
  530. entry->dev_addr = s->dma_address;
  531. entry->direction = direction;
  532. entry->sg_call_ents = nents;
  533. entry->sg_mapped_ents = mapped_ents;
  534. check_for_stack(dev, sg_virt(s));
  535. add_dma_entry(entry);
  536. }
  537. }
  538. EXPORT_SYMBOL(debug_dma_map_sg);
  539. void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
  540. int nelems, int dir)
  541. {
  542. struct dma_debug_entry *entry;
  543. struct scatterlist *s;
  544. int mapped_ents = 0, i;
  545. unsigned long flags;
  546. if (unlikely(global_disable))
  547. return;
  548. for_each_sg(sglist, s, nelems, i) {
  549. struct dma_debug_entry ref = {
  550. .type = dma_debug_sg,
  551. .dev = dev,
  552. .paddr = sg_phys(s),
  553. .dev_addr = s->dma_address,
  554. .size = s->length,
  555. .direction = dir,
  556. .sg_call_ents = 0,
  557. };
  558. if (mapped_ents && i >= mapped_ents)
  559. break;
  560. if (mapped_ents == 0) {
  561. struct hash_bucket *bucket;
  562. ref.sg_call_ents = nelems;
  563. bucket = get_hash_bucket(&ref, &flags);
  564. entry = hash_bucket_find(bucket, &ref);
  565. if (entry)
  566. mapped_ents = entry->sg_mapped_ents;
  567. put_hash_bucket(bucket, &flags);
  568. }
  569. check_unmap(&ref);
  570. }
  571. }
  572. EXPORT_SYMBOL(debug_dma_unmap_sg);
  573. void debug_dma_alloc_coherent(struct device *dev, size_t size,
  574. dma_addr_t dma_addr, void *virt)
  575. {
  576. struct dma_debug_entry *entry;
  577. if (unlikely(global_disable))
  578. return;
  579. if (unlikely(virt == NULL))
  580. return;
  581. entry = dma_entry_alloc();
  582. if (!entry)
  583. return;
  584. entry->type = dma_debug_coherent;
  585. entry->dev = dev;
  586. entry->paddr = virt_to_phys(virt);
  587. entry->size = size;
  588. entry->dev_addr = dma_addr;
  589. entry->direction = DMA_BIDIRECTIONAL;
  590. add_dma_entry(entry);
  591. }
  592. EXPORT_SYMBOL(debug_dma_alloc_coherent);
  593. void debug_dma_free_coherent(struct device *dev, size_t size,
  594. void *virt, dma_addr_t addr)
  595. {
  596. struct dma_debug_entry ref = {
  597. .type = dma_debug_coherent,
  598. .dev = dev,
  599. .paddr = virt_to_phys(virt),
  600. .dev_addr = addr,
  601. .size = size,
  602. .direction = DMA_BIDIRECTIONAL,
  603. };
  604. if (unlikely(global_disable))
  605. return;
  606. check_unmap(&ref);
  607. }
  608. EXPORT_SYMBOL(debug_dma_free_coherent);
  609. void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
  610. size_t size, int direction)
  611. {
  612. if (unlikely(global_disable))
  613. return;
  614. check_sync(dev, dma_handle, size, 0, direction, true);
  615. }
  616. EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
  617. void debug_dma_sync_single_for_device(struct device *dev,
  618. dma_addr_t dma_handle, size_t size,
  619. int direction)
  620. {
  621. if (unlikely(global_disable))
  622. return;
  623. check_sync(dev, dma_handle, size, 0, direction, false);
  624. }
  625. EXPORT_SYMBOL(debug_dma_sync_single_for_device);
  626. void debug_dma_sync_single_range_for_cpu(struct device *dev,
  627. dma_addr_t dma_handle,
  628. unsigned long offset, size_t size,
  629. int direction)
  630. {
  631. if (unlikely(global_disable))
  632. return;
  633. check_sync(dev, dma_handle, size, offset, direction, true);
  634. }
  635. EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
  636. void debug_dma_sync_single_range_for_device(struct device *dev,
  637. dma_addr_t dma_handle,
  638. unsigned long offset,
  639. size_t size, int direction)
  640. {
  641. if (unlikely(global_disable))
  642. return;
  643. check_sync(dev, dma_handle, size, offset, direction, false);
  644. }
  645. EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
  646. void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
  647. int nelems, int direction)
  648. {
  649. struct scatterlist *s;
  650. int i;
  651. if (unlikely(global_disable))
  652. return;
  653. for_each_sg(sg, s, nelems, i) {
  654. check_sync(dev, s->dma_address, s->dma_length, 0,
  655. direction, true);
  656. }
  657. }
  658. EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
  659. void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
  660. int nelems, int direction)
  661. {
  662. struct scatterlist *s;
  663. int i;
  664. if (unlikely(global_disable))
  665. return;
  666. for_each_sg(sg, s, nelems, i) {
  667. check_sync(dev, s->dma_address, s->dma_length, 0,
  668. direction, false);
  669. }
  670. }
  671. EXPORT_SYMBOL(debug_dma_sync_sg_for_device);