dma-debug.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964
  1. /*
  2. * Copyright (C) 2008 Advanced Micro Devices, Inc.
  3. *
  4. * Author: Joerg Roedel <joerg.roedel@amd.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published
  8. * by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. #include <linux/scatterlist.h>
  20. #include <linux/dma-mapping.h>
  21. #include <linux/stacktrace.h>
  22. #include <linux/dma-debug.h>
  23. #include <linux/spinlock.h>
  24. #include <linux/debugfs.h>
  25. #include <linux/device.h>
  26. #include <linux/types.h>
  27. #include <linux/sched.h>
  28. #include <linux/list.h>
  29. #include <linux/slab.h>
  30. #include <asm/sections.h>
  31. #define HASH_SIZE 1024ULL
  32. #define HASH_FN_SHIFT 13
  33. #define HASH_FN_MASK (HASH_SIZE - 1)
  34. enum {
  35. dma_debug_single,
  36. dma_debug_page,
  37. dma_debug_sg,
  38. dma_debug_coherent,
  39. };
  40. #define DMA_DEBUG_STACKTRACE_ENTRIES 5
  41. struct dma_debug_entry {
  42. struct list_head list;
  43. struct device *dev;
  44. int type;
  45. phys_addr_t paddr;
  46. u64 dev_addr;
  47. u64 size;
  48. int direction;
  49. int sg_call_ents;
  50. int sg_mapped_ents;
  51. #ifdef CONFIG_STACKTRACE
  52. struct stack_trace stacktrace;
  53. unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
  54. #endif
  55. };
  56. struct hash_bucket {
  57. struct list_head list;
  58. spinlock_t lock;
  59. } ____cacheline_aligned_in_smp;
  60. /* Hash list to save the allocated dma addresses */
  61. static struct hash_bucket dma_entry_hash[HASH_SIZE];
  62. /* List of pre-allocated dma_debug_entry's */
  63. static LIST_HEAD(free_entries);
  64. /* Lock for the list above */
  65. static DEFINE_SPINLOCK(free_entries_lock);
  66. /* Global disable flag - will be set in case of an error */
  67. static bool global_disable __read_mostly;
  68. /* Global error count */
  69. static u32 error_count;
  70. /* Global error show enable*/
  71. static u32 show_all_errors __read_mostly;
  72. /* Number of errors to show */
  73. static u32 show_num_errors = 1;
  74. static u32 num_free_entries;
  75. static u32 min_free_entries;
  76. static u32 nr_total_entries;
  77. /* number of preallocated entries requested by kernel cmdline */
  78. static u32 req_entries;
  79. /* debugfs dentry's for the stuff above */
  80. static struct dentry *dma_debug_dent __read_mostly;
  81. static struct dentry *global_disable_dent __read_mostly;
  82. static struct dentry *error_count_dent __read_mostly;
  83. static struct dentry *show_all_errors_dent __read_mostly;
  84. static struct dentry *show_num_errors_dent __read_mostly;
  85. static struct dentry *num_free_entries_dent __read_mostly;
  86. static struct dentry *min_free_entries_dent __read_mostly;
  87. static const char *type2name[4] = { "single", "page",
  88. "scather-gather", "coherent" };
  89. static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
  90. "DMA_FROM_DEVICE", "DMA_NONE" };
  91. /*
  92. * The access to some variables in this macro is racy. We can't use atomic_t
  93. * here because all these variables are exported to debugfs. Some of them even
  94. * writeable. This is also the reason why a lock won't help much. But anyway,
  95. * the races are no big deal. Here is why:
  96. *
  97. * error_count: the addition is racy, but the worst thing that can happen is
  98. * that we don't count some errors
  99. * show_num_errors: the subtraction is racy. Also no big deal because in
  100. * worst case this will result in one warning more in the
  101. * system log than the user configured. This variable is
  102. * writeable via debugfs.
  103. */
  104. static inline void dump_entry_trace(struct dma_debug_entry *entry)
  105. {
  106. #ifdef CONFIG_STACKTRACE
  107. if (entry) {
  108. printk(KERN_WARNING "Mapped at:\n");
  109. print_stack_trace(&entry->stacktrace, 0);
  110. }
  111. #endif
  112. }
  113. #define err_printk(dev, entry, format, arg...) do { \
  114. error_count += 1; \
  115. if (show_all_errors || show_num_errors > 0) { \
  116. WARN(1, "%s %s: " format, \
  117. dev_driver_string(dev), \
  118. dev_name(dev) , ## arg); \
  119. dump_entry_trace(entry); \
  120. } \
  121. if (!show_all_errors && show_num_errors > 0) \
  122. show_num_errors -= 1; \
  123. } while (0);
  124. /*
  125. * Hash related functions
  126. *
  127. * Every DMA-API request is saved into a struct dma_debug_entry. To
  128. * have quick access to these structs they are stored into a hash.
  129. */
  130. static int hash_fn(struct dma_debug_entry *entry)
  131. {
  132. /*
  133. * Hash function is based on the dma address.
  134. * We use bits 20-27 here as the index into the hash
  135. */
  136. return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
  137. }
  138. /*
  139. * Request exclusive access to a hash bucket for a given dma_debug_entry.
  140. */
  141. static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
  142. unsigned long *flags)
  143. {
  144. int idx = hash_fn(entry);
  145. unsigned long __flags;
  146. spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
  147. *flags = __flags;
  148. return &dma_entry_hash[idx];
  149. }
  150. /*
  151. * Give up exclusive access to the hash bucket
  152. */
  153. static void put_hash_bucket(struct hash_bucket *bucket,
  154. unsigned long *flags)
  155. {
  156. unsigned long __flags = *flags;
  157. spin_unlock_irqrestore(&bucket->lock, __flags);
  158. }
  159. /*
  160. * Search a given entry in the hash bucket list
  161. */
  162. static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket,
  163. struct dma_debug_entry *ref)
  164. {
  165. struct dma_debug_entry *entry;
  166. list_for_each_entry(entry, &bucket->list, list) {
  167. if ((entry->dev_addr == ref->dev_addr) &&
  168. (entry->dev == ref->dev))
  169. return entry;
  170. }
  171. return NULL;
  172. }
  173. /*
  174. * Add an entry to a hash bucket
  175. */
  176. static void hash_bucket_add(struct hash_bucket *bucket,
  177. struct dma_debug_entry *entry)
  178. {
  179. list_add_tail(&entry->list, &bucket->list);
  180. }
  181. /*
  182. * Remove entry from a hash bucket list
  183. */
  184. static void hash_bucket_del(struct dma_debug_entry *entry)
  185. {
  186. list_del(&entry->list);
  187. }
  188. /*
  189. * Dump mapping entries for debugging purposes
  190. */
  191. void debug_dma_dump_mappings(struct device *dev)
  192. {
  193. int idx;
  194. for (idx = 0; idx < HASH_SIZE; idx++) {
  195. struct hash_bucket *bucket = &dma_entry_hash[idx];
  196. struct dma_debug_entry *entry;
  197. unsigned long flags;
  198. spin_lock_irqsave(&bucket->lock, flags);
  199. list_for_each_entry(entry, &bucket->list, list) {
  200. if (!dev || dev == entry->dev) {
  201. dev_info(entry->dev,
  202. "%s idx %d P=%Lx D=%Lx L=%Lx %s\n",
  203. type2name[entry->type], idx,
  204. (unsigned long long)entry->paddr,
  205. entry->dev_addr, entry->size,
  206. dir2name[entry->direction]);
  207. }
  208. }
  209. spin_unlock_irqrestore(&bucket->lock, flags);
  210. }
  211. }
  212. EXPORT_SYMBOL(debug_dma_dump_mappings);
  213. /*
  214. * Wrapper function for adding an entry to the hash.
  215. * This function takes care of locking itself.
  216. */
  217. static void add_dma_entry(struct dma_debug_entry *entry)
  218. {
  219. struct hash_bucket *bucket;
  220. unsigned long flags;
  221. bucket = get_hash_bucket(entry, &flags);
  222. hash_bucket_add(bucket, entry);
  223. put_hash_bucket(bucket, &flags);
  224. }
  225. static struct dma_debug_entry *__dma_entry_alloc(void)
  226. {
  227. struct dma_debug_entry *entry;
  228. entry = list_entry(free_entries.next, struct dma_debug_entry, list);
  229. list_del(&entry->list);
  230. memset(entry, 0, sizeof(*entry));
  231. num_free_entries -= 1;
  232. if (num_free_entries < min_free_entries)
  233. min_free_entries = num_free_entries;
  234. return entry;
  235. }
  236. /* struct dma_entry allocator
  237. *
  238. * The next two functions implement the allocator for
  239. * struct dma_debug_entries.
  240. */
  241. static struct dma_debug_entry *dma_entry_alloc(void)
  242. {
  243. struct dma_debug_entry *entry = NULL;
  244. unsigned long flags;
  245. spin_lock_irqsave(&free_entries_lock, flags);
  246. if (list_empty(&free_entries)) {
  247. printk(KERN_ERR "DMA-API: debugging out of memory "
  248. "- disabling\n");
  249. global_disable = true;
  250. goto out;
  251. }
  252. entry = __dma_entry_alloc();
  253. #ifdef CONFIG_STACKTRACE
  254. entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
  255. entry->stacktrace.entries = entry->st_entries;
  256. entry->stacktrace.skip = 2;
  257. save_stack_trace(&entry->stacktrace);
  258. #endif
  259. out:
  260. spin_unlock_irqrestore(&free_entries_lock, flags);
  261. return entry;
  262. }
  263. static void dma_entry_free(struct dma_debug_entry *entry)
  264. {
  265. unsigned long flags;
  266. /*
  267. * add to beginning of the list - this way the entries are
  268. * more likely cache hot when they are reallocated.
  269. */
  270. spin_lock_irqsave(&free_entries_lock, flags);
  271. list_add(&entry->list, &free_entries);
  272. num_free_entries += 1;
  273. spin_unlock_irqrestore(&free_entries_lock, flags);
  274. }
  275. int dma_debug_resize_entries(u32 num_entries)
  276. {
  277. int i, delta, ret = 0;
  278. unsigned long flags;
  279. struct dma_debug_entry *entry;
  280. LIST_HEAD(tmp);
  281. spin_lock_irqsave(&free_entries_lock, flags);
  282. if (nr_total_entries < num_entries) {
  283. delta = num_entries - nr_total_entries;
  284. spin_unlock_irqrestore(&free_entries_lock, flags);
  285. for (i = 0; i < delta; i++) {
  286. entry = kzalloc(sizeof(*entry), GFP_KERNEL);
  287. if (!entry)
  288. break;
  289. list_add_tail(&entry->list, &tmp);
  290. }
  291. spin_lock_irqsave(&free_entries_lock, flags);
  292. list_splice(&tmp, &free_entries);
  293. nr_total_entries += i;
  294. num_free_entries += i;
  295. } else {
  296. delta = nr_total_entries - num_entries;
  297. for (i = 0; i < delta && !list_empty(&free_entries); i++) {
  298. entry = __dma_entry_alloc();
  299. kfree(entry);
  300. }
  301. nr_total_entries -= i;
  302. }
  303. if (nr_total_entries != num_entries)
  304. ret = 1;
  305. spin_unlock_irqrestore(&free_entries_lock, flags);
  306. return ret;
  307. }
  308. EXPORT_SYMBOL(dma_debug_resize_entries);
  309. /*
  310. * DMA-API debugging init code
  311. *
  312. * The init code does two things:
  313. * 1. Initialize core data structures
  314. * 2. Preallocate a given number of dma_debug_entry structs
  315. */
  316. static int prealloc_memory(u32 num_entries)
  317. {
  318. struct dma_debug_entry *entry, *next_entry;
  319. int i;
  320. for (i = 0; i < num_entries; ++i) {
  321. entry = kzalloc(sizeof(*entry), GFP_KERNEL);
  322. if (!entry)
  323. goto out_err;
  324. list_add_tail(&entry->list, &free_entries);
  325. }
  326. num_free_entries = num_entries;
  327. min_free_entries = num_entries;
  328. printk(KERN_INFO "DMA-API: preallocated %d debug entries\n",
  329. num_entries);
  330. return 0;
  331. out_err:
  332. list_for_each_entry_safe(entry, next_entry, &free_entries, list) {
  333. list_del(&entry->list);
  334. kfree(entry);
  335. }
  336. return -ENOMEM;
  337. }
  338. static int dma_debug_fs_init(void)
  339. {
  340. dma_debug_dent = debugfs_create_dir("dma-api", NULL);
  341. if (!dma_debug_dent) {
  342. printk(KERN_ERR "DMA-API: can not create debugfs directory\n");
  343. return -ENOMEM;
  344. }
  345. global_disable_dent = debugfs_create_bool("disabled", 0444,
  346. dma_debug_dent,
  347. (u32 *)&global_disable);
  348. if (!global_disable_dent)
  349. goto out_err;
  350. error_count_dent = debugfs_create_u32("error_count", 0444,
  351. dma_debug_dent, &error_count);
  352. if (!error_count_dent)
  353. goto out_err;
  354. show_all_errors_dent = debugfs_create_u32("all_errors", 0644,
  355. dma_debug_dent,
  356. &show_all_errors);
  357. if (!show_all_errors_dent)
  358. goto out_err;
  359. show_num_errors_dent = debugfs_create_u32("num_errors", 0644,
  360. dma_debug_dent,
  361. &show_num_errors);
  362. if (!show_num_errors_dent)
  363. goto out_err;
  364. num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444,
  365. dma_debug_dent,
  366. &num_free_entries);
  367. if (!num_free_entries_dent)
  368. goto out_err;
  369. min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444,
  370. dma_debug_dent,
  371. &min_free_entries);
  372. if (!min_free_entries_dent)
  373. goto out_err;
  374. return 0;
  375. out_err:
  376. debugfs_remove_recursive(dma_debug_dent);
  377. return -ENOMEM;
  378. }
  379. void dma_debug_add_bus(struct bus_type *bus)
  380. {
  381. /* FIXME: register notifier */
  382. }
  383. /*
  384. * Let the architectures decide how many entries should be preallocated.
  385. */
  386. void dma_debug_init(u32 num_entries)
  387. {
  388. int i;
  389. if (global_disable)
  390. return;
  391. for (i = 0; i < HASH_SIZE; ++i) {
  392. INIT_LIST_HEAD(&dma_entry_hash[i].list);
  393. dma_entry_hash[i].lock = SPIN_LOCK_UNLOCKED;
  394. }
  395. if (dma_debug_fs_init() != 0) {
  396. printk(KERN_ERR "DMA-API: error creating debugfs entries "
  397. "- disabling\n");
  398. global_disable = true;
  399. return;
  400. }
  401. if (req_entries)
  402. num_entries = req_entries;
  403. if (prealloc_memory(num_entries) != 0) {
  404. printk(KERN_ERR "DMA-API: debugging out of memory error "
  405. "- disabled\n");
  406. global_disable = true;
  407. return;
  408. }
  409. nr_total_entries = num_free_entries;
  410. printk(KERN_INFO "DMA-API: debugging enabled by kernel config\n");
  411. }
  412. static __init int dma_debug_cmdline(char *str)
  413. {
  414. if (!str)
  415. return -EINVAL;
  416. if (strncmp(str, "off", 3) == 0) {
  417. printk(KERN_INFO "DMA-API: debugging disabled on kernel "
  418. "command line\n");
  419. global_disable = true;
  420. }
  421. return 0;
  422. }
  423. static __init int dma_debug_entries_cmdline(char *str)
  424. {
  425. int res;
  426. if (!str)
  427. return -EINVAL;
  428. res = get_option(&str, &req_entries);
  429. if (!res)
  430. req_entries = 0;
  431. return 0;
  432. }
  433. __setup("dma_debug=", dma_debug_cmdline);
  434. __setup("dma_debug_entries=", dma_debug_entries_cmdline);
  435. static void check_unmap(struct dma_debug_entry *ref)
  436. {
  437. struct dma_debug_entry *entry;
  438. struct hash_bucket *bucket;
  439. unsigned long flags;
  440. if (dma_mapping_error(ref->dev, ref->dev_addr)) {
  441. err_printk(ref->dev, NULL, "DMA-API: device driver tries "
  442. "to free an invalid DMA memory address\n");
  443. return;
  444. }
  445. bucket = get_hash_bucket(ref, &flags);
  446. entry = hash_bucket_find(bucket, ref);
  447. if (!entry) {
  448. err_printk(ref->dev, NULL, "DMA-API: device driver tries "
  449. "to free DMA memory it has not allocated "
  450. "[device address=0x%016llx] [size=%llu bytes]\n",
  451. ref->dev_addr, ref->size);
  452. goto out;
  453. }
  454. if (ref->size != entry->size) {
  455. err_printk(ref->dev, entry, "DMA-API: device driver frees "
  456. "DMA memory with different size "
  457. "[device address=0x%016llx] [map size=%llu bytes] "
  458. "[unmap size=%llu bytes]\n",
  459. ref->dev_addr, entry->size, ref->size);
  460. }
  461. if (ref->type != entry->type) {
  462. err_printk(ref->dev, entry, "DMA-API: device driver frees "
  463. "DMA memory with wrong function "
  464. "[device address=0x%016llx] [size=%llu bytes] "
  465. "[mapped as %s] [unmapped as %s]\n",
  466. ref->dev_addr, ref->size,
  467. type2name[entry->type], type2name[ref->type]);
  468. } else if ((entry->type == dma_debug_coherent) &&
  469. (ref->paddr != entry->paddr)) {
  470. err_printk(ref->dev, entry, "DMA-API: device driver frees "
  471. "DMA memory with different CPU address "
  472. "[device address=0x%016llx] [size=%llu bytes] "
  473. "[cpu alloc address=%p] [cpu free address=%p]",
  474. ref->dev_addr, ref->size,
  475. (void *)entry->paddr, (void *)ref->paddr);
  476. }
  477. if (ref->sg_call_ents && ref->type == dma_debug_sg &&
  478. ref->sg_call_ents != entry->sg_call_ents) {
  479. err_printk(ref->dev, entry, "DMA-API: device driver frees "
  480. "DMA sg list with different entry count "
  481. "[map count=%d] [unmap count=%d]\n",
  482. entry->sg_call_ents, ref->sg_call_ents);
  483. }
  484. /*
  485. * This may be no bug in reality - but most implementations of the
  486. * DMA API don't handle this properly, so check for it here
  487. */
  488. if (ref->direction != entry->direction) {
  489. err_printk(ref->dev, entry, "DMA-API: device driver frees "
  490. "DMA memory with different direction "
  491. "[device address=0x%016llx] [size=%llu bytes] "
  492. "[mapped with %s] [unmapped with %s]\n",
  493. ref->dev_addr, ref->size,
  494. dir2name[entry->direction],
  495. dir2name[ref->direction]);
  496. }
  497. hash_bucket_del(entry);
  498. dma_entry_free(entry);
  499. out:
  500. put_hash_bucket(bucket, &flags);
  501. }
  502. static void check_for_stack(struct device *dev, void *addr)
  503. {
  504. if (object_is_on_stack(addr))
  505. err_printk(dev, NULL, "DMA-API: device driver maps memory from"
  506. "stack [addr=%p]\n", addr);
  507. }
  508. static inline bool overlap(void *addr, u64 size, void *start, void *end)
  509. {
  510. void *addr2 = (char *)addr + size;
  511. return ((addr >= start && addr < end) ||
  512. (addr2 >= start && addr2 < end) ||
  513. ((addr < start) && (addr2 >= end)));
  514. }
  515. static void check_for_illegal_area(struct device *dev, void *addr, u64 size)
  516. {
  517. if (overlap(addr, size, _text, _etext) ||
  518. overlap(addr, size, __start_rodata, __end_rodata))
  519. err_printk(dev, NULL, "DMA-API: device driver maps "
  520. "memory from kernel text or rodata "
  521. "[addr=%p] [size=%llu]\n", addr, size);
  522. }
  523. static void check_sync(struct device *dev, dma_addr_t addr,
  524. u64 size, u64 offset, int direction, bool to_cpu)
  525. {
  526. struct dma_debug_entry ref = {
  527. .dev = dev,
  528. .dev_addr = addr,
  529. .size = size,
  530. .direction = direction,
  531. };
  532. struct dma_debug_entry *entry;
  533. struct hash_bucket *bucket;
  534. unsigned long flags;
  535. bucket = get_hash_bucket(&ref, &flags);
  536. entry = hash_bucket_find(bucket, &ref);
  537. if (!entry) {
  538. err_printk(dev, NULL, "DMA-API: device driver tries "
  539. "to sync DMA memory it has not allocated "
  540. "[device address=0x%016llx] [size=%llu bytes]\n",
  541. (unsigned long long)addr, size);
  542. goto out;
  543. }
  544. if ((offset + size) > entry->size) {
  545. err_printk(dev, entry, "DMA-API: device driver syncs"
  546. " DMA memory outside allocated range "
  547. "[device address=0x%016llx] "
  548. "[allocation size=%llu bytes] [sync offset=%llu] "
  549. "[sync size=%llu]\n", entry->dev_addr, entry->size,
  550. offset, size);
  551. }
  552. if (direction != entry->direction) {
  553. err_printk(dev, entry, "DMA-API: device driver syncs "
  554. "DMA memory with different direction "
  555. "[device address=0x%016llx] [size=%llu bytes] "
  556. "[mapped with %s] [synced with %s]\n",
  557. (unsigned long long)addr, entry->size,
  558. dir2name[entry->direction],
  559. dir2name[direction]);
  560. }
  561. if (entry->direction == DMA_BIDIRECTIONAL)
  562. goto out;
  563. if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
  564. !(direction == DMA_TO_DEVICE))
  565. err_printk(dev, entry, "DMA-API: device driver syncs "
  566. "device read-only DMA memory for cpu "
  567. "[device address=0x%016llx] [size=%llu bytes] "
  568. "[mapped with %s] [synced with %s]\n",
  569. (unsigned long long)addr, entry->size,
  570. dir2name[entry->direction],
  571. dir2name[direction]);
  572. if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
  573. !(direction == DMA_FROM_DEVICE))
  574. err_printk(dev, entry, "DMA-API: device driver syncs "
  575. "device write-only DMA memory to device "
  576. "[device address=0x%016llx] [size=%llu bytes] "
  577. "[mapped with %s] [synced with %s]\n",
  578. (unsigned long long)addr, entry->size,
  579. dir2name[entry->direction],
  580. dir2name[direction]);
  581. out:
  582. put_hash_bucket(bucket, &flags);
  583. }
  584. void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
  585. size_t size, int direction, dma_addr_t dma_addr,
  586. bool map_single)
  587. {
  588. struct dma_debug_entry *entry;
  589. if (unlikely(global_disable))
  590. return;
  591. if (unlikely(dma_mapping_error(dev, dma_addr)))
  592. return;
  593. entry = dma_entry_alloc();
  594. if (!entry)
  595. return;
  596. entry->dev = dev;
  597. entry->type = dma_debug_page;
  598. entry->paddr = page_to_phys(page) + offset;
  599. entry->dev_addr = dma_addr;
  600. entry->size = size;
  601. entry->direction = direction;
  602. if (map_single)
  603. entry->type = dma_debug_single;
  604. if (!PageHighMem(page)) {
  605. void *addr = ((char *)page_address(page)) + offset;
  606. check_for_stack(dev, addr);
  607. check_for_illegal_area(dev, addr, size);
  608. }
  609. add_dma_entry(entry);
  610. }
  611. EXPORT_SYMBOL(debug_dma_map_page);
  612. void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
  613. size_t size, int direction, bool map_single)
  614. {
  615. struct dma_debug_entry ref = {
  616. .type = dma_debug_page,
  617. .dev = dev,
  618. .dev_addr = addr,
  619. .size = size,
  620. .direction = direction,
  621. };
  622. if (unlikely(global_disable))
  623. return;
  624. if (map_single)
  625. ref.type = dma_debug_single;
  626. check_unmap(&ref);
  627. }
  628. EXPORT_SYMBOL(debug_dma_unmap_page);
  629. void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
  630. int nents, int mapped_ents, int direction)
  631. {
  632. struct dma_debug_entry *entry;
  633. struct scatterlist *s;
  634. int i;
  635. if (unlikely(global_disable))
  636. return;
  637. for_each_sg(sg, s, mapped_ents, i) {
  638. entry = dma_entry_alloc();
  639. if (!entry)
  640. return;
  641. entry->type = dma_debug_sg;
  642. entry->dev = dev;
  643. entry->paddr = sg_phys(s);
  644. entry->size = s->length;
  645. entry->dev_addr = s->dma_address;
  646. entry->direction = direction;
  647. entry->sg_call_ents = nents;
  648. entry->sg_mapped_ents = mapped_ents;
  649. if (!PageHighMem(sg_page(s))) {
  650. check_for_stack(dev, sg_virt(s));
  651. check_for_illegal_area(dev, sg_virt(s), s->length);
  652. }
  653. add_dma_entry(entry);
  654. }
  655. }
  656. EXPORT_SYMBOL(debug_dma_map_sg);
  657. void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
  658. int nelems, int dir)
  659. {
  660. struct dma_debug_entry *entry;
  661. struct scatterlist *s;
  662. int mapped_ents = 0, i;
  663. unsigned long flags;
  664. if (unlikely(global_disable))
  665. return;
  666. for_each_sg(sglist, s, nelems, i) {
  667. struct dma_debug_entry ref = {
  668. .type = dma_debug_sg,
  669. .dev = dev,
  670. .paddr = sg_phys(s),
  671. .dev_addr = s->dma_address,
  672. .size = s->length,
  673. .direction = dir,
  674. .sg_call_ents = 0,
  675. };
  676. if (mapped_ents && i >= mapped_ents)
  677. break;
  678. if (mapped_ents == 0) {
  679. struct hash_bucket *bucket;
  680. ref.sg_call_ents = nelems;
  681. bucket = get_hash_bucket(&ref, &flags);
  682. entry = hash_bucket_find(bucket, &ref);
  683. if (entry)
  684. mapped_ents = entry->sg_mapped_ents;
  685. put_hash_bucket(bucket, &flags);
  686. }
  687. check_unmap(&ref);
  688. }
  689. }
  690. EXPORT_SYMBOL(debug_dma_unmap_sg);
  691. void debug_dma_alloc_coherent(struct device *dev, size_t size,
  692. dma_addr_t dma_addr, void *virt)
  693. {
  694. struct dma_debug_entry *entry;
  695. if (unlikely(global_disable))
  696. return;
  697. if (unlikely(virt == NULL))
  698. return;
  699. entry = dma_entry_alloc();
  700. if (!entry)
  701. return;
  702. entry->type = dma_debug_coherent;
  703. entry->dev = dev;
  704. entry->paddr = virt_to_phys(virt);
  705. entry->size = size;
  706. entry->dev_addr = dma_addr;
  707. entry->direction = DMA_BIDIRECTIONAL;
  708. add_dma_entry(entry);
  709. }
  710. EXPORT_SYMBOL(debug_dma_alloc_coherent);
  711. void debug_dma_free_coherent(struct device *dev, size_t size,
  712. void *virt, dma_addr_t addr)
  713. {
  714. struct dma_debug_entry ref = {
  715. .type = dma_debug_coherent,
  716. .dev = dev,
  717. .paddr = virt_to_phys(virt),
  718. .dev_addr = addr,
  719. .size = size,
  720. .direction = DMA_BIDIRECTIONAL,
  721. };
  722. if (unlikely(global_disable))
  723. return;
  724. check_unmap(&ref);
  725. }
  726. EXPORT_SYMBOL(debug_dma_free_coherent);
  727. void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
  728. size_t size, int direction)
  729. {
  730. if (unlikely(global_disable))
  731. return;
  732. check_sync(dev, dma_handle, size, 0, direction, true);
  733. }
  734. EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
  735. void debug_dma_sync_single_for_device(struct device *dev,
  736. dma_addr_t dma_handle, size_t size,
  737. int direction)
  738. {
  739. if (unlikely(global_disable))
  740. return;
  741. check_sync(dev, dma_handle, size, 0, direction, false);
  742. }
  743. EXPORT_SYMBOL(debug_dma_sync_single_for_device);
  744. void debug_dma_sync_single_range_for_cpu(struct device *dev,
  745. dma_addr_t dma_handle,
  746. unsigned long offset, size_t size,
  747. int direction)
  748. {
  749. if (unlikely(global_disable))
  750. return;
  751. check_sync(dev, dma_handle, size, offset, direction, true);
  752. }
  753. EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
  754. void debug_dma_sync_single_range_for_device(struct device *dev,
  755. dma_addr_t dma_handle,
  756. unsigned long offset,
  757. size_t size, int direction)
  758. {
  759. if (unlikely(global_disable))
  760. return;
  761. check_sync(dev, dma_handle, size, offset, direction, false);
  762. }
  763. EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
  764. void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
  765. int nelems, int direction)
  766. {
  767. struct scatterlist *s;
  768. int i;
  769. if (unlikely(global_disable))
  770. return;
  771. for_each_sg(sg, s, nelems, i) {
  772. check_sync(dev, s->dma_address, s->dma_length, 0,
  773. direction, true);
  774. }
  775. }
  776. EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
  777. void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
  778. int nelems, int direction)
  779. {
  780. struct scatterlist *s;
  781. int i;
  782. if (unlikely(global_disable))
  783. return;
  784. for_each_sg(sg, s, nelems, i) {
  785. check_sync(dev, s->dma_address, s->dma_length, 0,
  786. direction, false);
  787. }
  788. }
  789. EXPORT_SYMBOL(debug_dma_sync_sg_for_device);