dma-debug.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011
  1. /*
  2. * Copyright (C) 2008 Advanced Micro Devices, Inc.
  3. *
  4. * Author: Joerg Roedel <joerg.roedel@amd.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published
  8. * by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. #include <linux/scatterlist.h>
  20. #include <linux/dma-mapping.h>
  21. #include <linux/stacktrace.h>
  22. #include <linux/dma-debug.h>
  23. #include <linux/spinlock.h>
  24. #include <linux/debugfs.h>
  25. #include <linux/device.h>
  26. #include <linux/types.h>
  27. #include <linux/sched.h>
  28. #include <linux/list.h>
  29. #include <linux/slab.h>
  30. #include <asm/sections.h>
  31. #define HASH_SIZE 1024ULL
  32. #define HASH_FN_SHIFT 13
  33. #define HASH_FN_MASK (HASH_SIZE - 1)
  34. enum {
  35. dma_debug_single,
  36. dma_debug_page,
  37. dma_debug_sg,
  38. dma_debug_coherent,
  39. };
  40. #define DMA_DEBUG_STACKTRACE_ENTRIES 5
  41. struct dma_debug_entry {
  42. struct list_head list;
  43. struct device *dev;
  44. int type;
  45. phys_addr_t paddr;
  46. u64 dev_addr;
  47. u64 size;
  48. int direction;
  49. int sg_call_ents;
  50. int sg_mapped_ents;
  51. #ifdef CONFIG_STACKTRACE
  52. struct stack_trace stacktrace;
  53. unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
  54. #endif
  55. };
  56. struct hash_bucket {
  57. struct list_head list;
  58. spinlock_t lock;
  59. } ____cacheline_aligned_in_smp;
  60. /* Hash list to save the allocated dma addresses */
  61. static struct hash_bucket dma_entry_hash[HASH_SIZE];
  62. /* List of pre-allocated dma_debug_entry's */
  63. static LIST_HEAD(free_entries);
  64. /* Lock for the list above */
  65. static DEFINE_SPINLOCK(free_entries_lock);
  66. /* Global disable flag - will be set in case of an error */
  67. static bool global_disable __read_mostly;
  68. /* Global error count */
  69. static u32 error_count;
  70. /* Global error show enable*/
  71. static u32 show_all_errors __read_mostly;
  72. /* Number of errors to show */
  73. static u32 show_num_errors = 1;
  74. static u32 num_free_entries;
  75. static u32 min_free_entries;
  76. static u32 nr_total_entries;
  77. /* number of preallocated entries requested by kernel cmdline */
  78. static u32 req_entries;
  79. /* debugfs dentry's for the stuff above */
  80. static struct dentry *dma_debug_dent __read_mostly;
  81. static struct dentry *global_disable_dent __read_mostly;
  82. static struct dentry *error_count_dent __read_mostly;
  83. static struct dentry *show_all_errors_dent __read_mostly;
  84. static struct dentry *show_num_errors_dent __read_mostly;
  85. static struct dentry *num_free_entries_dent __read_mostly;
  86. static struct dentry *min_free_entries_dent __read_mostly;
  87. /* per-driver filter related state */
  88. #define NAME_MAX_LEN 64
  89. static char current_driver_name[NAME_MAX_LEN] __read_mostly;
  90. static struct device_driver *current_driver __read_mostly;
  91. static DEFINE_RWLOCK(driver_name_lock);
  92. static const char *type2name[4] = { "single", "page",
  93. "scather-gather", "coherent" };
  94. static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
  95. "DMA_FROM_DEVICE", "DMA_NONE" };
  96. /*
  97. * The access to some variables in this macro is racy. We can't use atomic_t
  98. * here because all these variables are exported to debugfs. Some of them even
  99. * writeable. This is also the reason why a lock won't help much. But anyway,
  100. * the races are no big deal. Here is why:
  101. *
  102. * error_count: the addition is racy, but the worst thing that can happen is
  103. * that we don't count some errors
  104. * show_num_errors: the subtraction is racy. Also no big deal because in
  105. * worst case this will result in one warning more in the
  106. * system log than the user configured. This variable is
  107. * writeable via debugfs.
  108. */
  109. static inline void dump_entry_trace(struct dma_debug_entry *entry)
  110. {
  111. #ifdef CONFIG_STACKTRACE
  112. if (entry) {
  113. printk(KERN_WARNING "Mapped at:\n");
  114. print_stack_trace(&entry->stacktrace, 0);
  115. }
  116. #endif
  117. }
  118. static bool driver_filter(struct device *dev)
  119. {
  120. /* driver filter off */
  121. if (likely(!current_driver_name[0]))
  122. return true;
  123. /* driver filter on and initialized */
  124. if (current_driver && dev->driver == current_driver)
  125. return true;
  126. /* driver filter on but not yet initialized */
  127. if (!current_driver && current_driver_name[0]) {
  128. struct device_driver *drv = get_driver(dev->driver);
  129. unsigned long flags;
  130. bool ret = false;
  131. if (!drv)
  132. return false;
  133. /* lock to protect against change of current_driver_name */
  134. read_lock_irqsave(&driver_name_lock, flags);
  135. if (drv->name &&
  136. strncmp(current_driver_name, drv->name, 63) == 0) {
  137. current_driver = drv;
  138. ret = true;
  139. }
  140. read_unlock_irqrestore(&driver_name_lock, flags);
  141. put_driver(drv);
  142. return ret;
  143. }
  144. return false;
  145. }
  146. #define err_printk(dev, entry, format, arg...) do { \
  147. error_count += 1; \
  148. if (driver_filter(dev) && \
  149. (show_all_errors || show_num_errors > 0)) { \
  150. WARN(1, "%s %s: " format, \
  151. dev_driver_string(dev), \
  152. dev_name(dev) , ## arg); \
  153. dump_entry_trace(entry); \
  154. } \
  155. if (!show_all_errors && show_num_errors > 0) \
  156. show_num_errors -= 1; \
  157. } while (0);
  158. /*
  159. * Hash related functions
  160. *
  161. * Every DMA-API request is saved into a struct dma_debug_entry. To
  162. * have quick access to these structs they are stored into a hash.
  163. */
  164. static int hash_fn(struct dma_debug_entry *entry)
  165. {
  166. /*
  167. * Hash function is based on the dma address.
  168. * We use bits 20-27 here as the index into the hash
  169. */
  170. return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
  171. }
  172. /*
  173. * Request exclusive access to a hash bucket for a given dma_debug_entry.
  174. */
  175. static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
  176. unsigned long *flags)
  177. {
  178. int idx = hash_fn(entry);
  179. unsigned long __flags;
  180. spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
  181. *flags = __flags;
  182. return &dma_entry_hash[idx];
  183. }
  184. /*
  185. * Give up exclusive access to the hash bucket
  186. */
  187. static void put_hash_bucket(struct hash_bucket *bucket,
  188. unsigned long *flags)
  189. {
  190. unsigned long __flags = *flags;
  191. spin_unlock_irqrestore(&bucket->lock, __flags);
  192. }
  193. /*
  194. * Search a given entry in the hash bucket list
  195. */
  196. static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket,
  197. struct dma_debug_entry *ref)
  198. {
  199. struct dma_debug_entry *entry;
  200. list_for_each_entry(entry, &bucket->list, list) {
  201. if ((entry->dev_addr == ref->dev_addr) &&
  202. (entry->dev == ref->dev))
  203. return entry;
  204. }
  205. return NULL;
  206. }
  207. /*
  208. * Add an entry to a hash bucket
  209. */
  210. static void hash_bucket_add(struct hash_bucket *bucket,
  211. struct dma_debug_entry *entry)
  212. {
  213. list_add_tail(&entry->list, &bucket->list);
  214. }
  215. /*
  216. * Remove entry from a hash bucket list
  217. */
  218. static void hash_bucket_del(struct dma_debug_entry *entry)
  219. {
  220. list_del(&entry->list);
  221. }
  222. /*
  223. * Dump mapping entries for debugging purposes
  224. */
  225. void debug_dma_dump_mappings(struct device *dev)
  226. {
  227. int idx;
  228. for (idx = 0; idx < HASH_SIZE; idx++) {
  229. struct hash_bucket *bucket = &dma_entry_hash[idx];
  230. struct dma_debug_entry *entry;
  231. unsigned long flags;
  232. spin_lock_irqsave(&bucket->lock, flags);
  233. list_for_each_entry(entry, &bucket->list, list) {
  234. if (!dev || dev == entry->dev) {
  235. dev_info(entry->dev,
  236. "%s idx %d P=%Lx D=%Lx L=%Lx %s\n",
  237. type2name[entry->type], idx,
  238. (unsigned long long)entry->paddr,
  239. entry->dev_addr, entry->size,
  240. dir2name[entry->direction]);
  241. }
  242. }
  243. spin_unlock_irqrestore(&bucket->lock, flags);
  244. }
  245. }
  246. EXPORT_SYMBOL(debug_dma_dump_mappings);
  247. /*
  248. * Wrapper function for adding an entry to the hash.
  249. * This function takes care of locking itself.
  250. */
  251. static void add_dma_entry(struct dma_debug_entry *entry)
  252. {
  253. struct hash_bucket *bucket;
  254. unsigned long flags;
  255. bucket = get_hash_bucket(entry, &flags);
  256. hash_bucket_add(bucket, entry);
  257. put_hash_bucket(bucket, &flags);
  258. }
  259. static struct dma_debug_entry *__dma_entry_alloc(void)
  260. {
  261. struct dma_debug_entry *entry;
  262. entry = list_entry(free_entries.next, struct dma_debug_entry, list);
  263. list_del(&entry->list);
  264. memset(entry, 0, sizeof(*entry));
  265. num_free_entries -= 1;
  266. if (num_free_entries < min_free_entries)
  267. min_free_entries = num_free_entries;
  268. return entry;
  269. }
  270. /* struct dma_entry allocator
  271. *
  272. * The next two functions implement the allocator for
  273. * struct dma_debug_entries.
  274. */
  275. static struct dma_debug_entry *dma_entry_alloc(void)
  276. {
  277. struct dma_debug_entry *entry = NULL;
  278. unsigned long flags;
  279. spin_lock_irqsave(&free_entries_lock, flags);
  280. if (list_empty(&free_entries)) {
  281. printk(KERN_ERR "DMA-API: debugging out of memory "
  282. "- disabling\n");
  283. global_disable = true;
  284. goto out;
  285. }
  286. entry = __dma_entry_alloc();
  287. #ifdef CONFIG_STACKTRACE
  288. entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
  289. entry->stacktrace.entries = entry->st_entries;
  290. entry->stacktrace.skip = 2;
  291. save_stack_trace(&entry->stacktrace);
  292. #endif
  293. out:
  294. spin_unlock_irqrestore(&free_entries_lock, flags);
  295. return entry;
  296. }
  297. static void dma_entry_free(struct dma_debug_entry *entry)
  298. {
  299. unsigned long flags;
  300. /*
  301. * add to beginning of the list - this way the entries are
  302. * more likely cache hot when they are reallocated.
  303. */
  304. spin_lock_irqsave(&free_entries_lock, flags);
  305. list_add(&entry->list, &free_entries);
  306. num_free_entries += 1;
  307. spin_unlock_irqrestore(&free_entries_lock, flags);
  308. }
  309. int dma_debug_resize_entries(u32 num_entries)
  310. {
  311. int i, delta, ret = 0;
  312. unsigned long flags;
  313. struct dma_debug_entry *entry;
  314. LIST_HEAD(tmp);
  315. spin_lock_irqsave(&free_entries_lock, flags);
  316. if (nr_total_entries < num_entries) {
  317. delta = num_entries - nr_total_entries;
  318. spin_unlock_irqrestore(&free_entries_lock, flags);
  319. for (i = 0; i < delta; i++) {
  320. entry = kzalloc(sizeof(*entry), GFP_KERNEL);
  321. if (!entry)
  322. break;
  323. list_add_tail(&entry->list, &tmp);
  324. }
  325. spin_lock_irqsave(&free_entries_lock, flags);
  326. list_splice(&tmp, &free_entries);
  327. nr_total_entries += i;
  328. num_free_entries += i;
  329. } else {
  330. delta = nr_total_entries - num_entries;
  331. for (i = 0; i < delta && !list_empty(&free_entries); i++) {
  332. entry = __dma_entry_alloc();
  333. kfree(entry);
  334. }
  335. nr_total_entries -= i;
  336. }
  337. if (nr_total_entries != num_entries)
  338. ret = 1;
  339. spin_unlock_irqrestore(&free_entries_lock, flags);
  340. return ret;
  341. }
  342. EXPORT_SYMBOL(dma_debug_resize_entries);
  343. /*
  344. * DMA-API debugging init code
  345. *
  346. * The init code does two things:
  347. * 1. Initialize core data structures
  348. * 2. Preallocate a given number of dma_debug_entry structs
  349. */
  350. static int prealloc_memory(u32 num_entries)
  351. {
  352. struct dma_debug_entry *entry, *next_entry;
  353. int i;
  354. for (i = 0; i < num_entries; ++i) {
  355. entry = kzalloc(sizeof(*entry), GFP_KERNEL);
  356. if (!entry)
  357. goto out_err;
  358. list_add_tail(&entry->list, &free_entries);
  359. }
  360. num_free_entries = num_entries;
  361. min_free_entries = num_entries;
  362. printk(KERN_INFO "DMA-API: preallocated %d debug entries\n",
  363. num_entries);
  364. return 0;
  365. out_err:
  366. list_for_each_entry_safe(entry, next_entry, &free_entries, list) {
  367. list_del(&entry->list);
  368. kfree(entry);
  369. }
  370. return -ENOMEM;
  371. }
  372. static int dma_debug_fs_init(void)
  373. {
  374. dma_debug_dent = debugfs_create_dir("dma-api", NULL);
  375. if (!dma_debug_dent) {
  376. printk(KERN_ERR "DMA-API: can not create debugfs directory\n");
  377. return -ENOMEM;
  378. }
  379. global_disable_dent = debugfs_create_bool("disabled", 0444,
  380. dma_debug_dent,
  381. (u32 *)&global_disable);
  382. if (!global_disable_dent)
  383. goto out_err;
  384. error_count_dent = debugfs_create_u32("error_count", 0444,
  385. dma_debug_dent, &error_count);
  386. if (!error_count_dent)
  387. goto out_err;
  388. show_all_errors_dent = debugfs_create_u32("all_errors", 0644,
  389. dma_debug_dent,
  390. &show_all_errors);
  391. if (!show_all_errors_dent)
  392. goto out_err;
  393. show_num_errors_dent = debugfs_create_u32("num_errors", 0644,
  394. dma_debug_dent,
  395. &show_num_errors);
  396. if (!show_num_errors_dent)
  397. goto out_err;
  398. num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444,
  399. dma_debug_dent,
  400. &num_free_entries);
  401. if (!num_free_entries_dent)
  402. goto out_err;
  403. min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444,
  404. dma_debug_dent,
  405. &min_free_entries);
  406. if (!min_free_entries_dent)
  407. goto out_err;
  408. return 0;
  409. out_err:
  410. debugfs_remove_recursive(dma_debug_dent);
  411. return -ENOMEM;
  412. }
  413. void dma_debug_add_bus(struct bus_type *bus)
  414. {
  415. /* FIXME: register notifier */
  416. }
  417. /*
  418. * Let the architectures decide how many entries should be preallocated.
  419. */
  420. void dma_debug_init(u32 num_entries)
  421. {
  422. int i;
  423. if (global_disable)
  424. return;
  425. for (i = 0; i < HASH_SIZE; ++i) {
  426. INIT_LIST_HEAD(&dma_entry_hash[i].list);
  427. dma_entry_hash[i].lock = SPIN_LOCK_UNLOCKED;
  428. }
  429. if (dma_debug_fs_init() != 0) {
  430. printk(KERN_ERR "DMA-API: error creating debugfs entries "
  431. "- disabling\n");
  432. global_disable = true;
  433. return;
  434. }
  435. if (req_entries)
  436. num_entries = req_entries;
  437. if (prealloc_memory(num_entries) != 0) {
  438. printk(KERN_ERR "DMA-API: debugging out of memory error "
  439. "- disabled\n");
  440. global_disable = true;
  441. return;
  442. }
  443. nr_total_entries = num_free_entries;
  444. printk(KERN_INFO "DMA-API: debugging enabled by kernel config\n");
  445. }
  446. static __init int dma_debug_cmdline(char *str)
  447. {
  448. if (!str)
  449. return -EINVAL;
  450. if (strncmp(str, "off", 3) == 0) {
  451. printk(KERN_INFO "DMA-API: debugging disabled on kernel "
  452. "command line\n");
  453. global_disable = true;
  454. }
  455. return 0;
  456. }
  457. static __init int dma_debug_entries_cmdline(char *str)
  458. {
  459. int res;
  460. if (!str)
  461. return -EINVAL;
  462. res = get_option(&str, &req_entries);
  463. if (!res)
  464. req_entries = 0;
  465. return 0;
  466. }
  467. __setup("dma_debug=", dma_debug_cmdline);
  468. __setup("dma_debug_entries=", dma_debug_entries_cmdline);
  469. static void check_unmap(struct dma_debug_entry *ref)
  470. {
  471. struct dma_debug_entry *entry;
  472. struct hash_bucket *bucket;
  473. unsigned long flags;
  474. if (dma_mapping_error(ref->dev, ref->dev_addr)) {
  475. err_printk(ref->dev, NULL, "DMA-API: device driver tries "
  476. "to free an invalid DMA memory address\n");
  477. return;
  478. }
  479. bucket = get_hash_bucket(ref, &flags);
  480. entry = hash_bucket_find(bucket, ref);
  481. if (!entry) {
  482. err_printk(ref->dev, NULL, "DMA-API: device driver tries "
  483. "to free DMA memory it has not allocated "
  484. "[device address=0x%016llx] [size=%llu bytes]\n",
  485. ref->dev_addr, ref->size);
  486. goto out;
  487. }
  488. if (ref->size != entry->size) {
  489. err_printk(ref->dev, entry, "DMA-API: device driver frees "
  490. "DMA memory with different size "
  491. "[device address=0x%016llx] [map size=%llu bytes] "
  492. "[unmap size=%llu bytes]\n",
  493. ref->dev_addr, entry->size, ref->size);
  494. }
  495. if (ref->type != entry->type) {
  496. err_printk(ref->dev, entry, "DMA-API: device driver frees "
  497. "DMA memory with wrong function "
  498. "[device address=0x%016llx] [size=%llu bytes] "
  499. "[mapped as %s] [unmapped as %s]\n",
  500. ref->dev_addr, ref->size,
  501. type2name[entry->type], type2name[ref->type]);
  502. } else if ((entry->type == dma_debug_coherent) &&
  503. (ref->paddr != entry->paddr)) {
  504. err_printk(ref->dev, entry, "DMA-API: device driver frees "
  505. "DMA memory with different CPU address "
  506. "[device address=0x%016llx] [size=%llu bytes] "
  507. "[cpu alloc address=%p] [cpu free address=%p]",
  508. ref->dev_addr, ref->size,
  509. (void *)entry->paddr, (void *)ref->paddr);
  510. }
  511. if (ref->sg_call_ents && ref->type == dma_debug_sg &&
  512. ref->sg_call_ents != entry->sg_call_ents) {
  513. err_printk(ref->dev, entry, "DMA-API: device driver frees "
  514. "DMA sg list with different entry count "
  515. "[map count=%d] [unmap count=%d]\n",
  516. entry->sg_call_ents, ref->sg_call_ents);
  517. }
  518. /*
  519. * This may be no bug in reality - but most implementations of the
  520. * DMA API don't handle this properly, so check for it here
  521. */
  522. if (ref->direction != entry->direction) {
  523. err_printk(ref->dev, entry, "DMA-API: device driver frees "
  524. "DMA memory with different direction "
  525. "[device address=0x%016llx] [size=%llu bytes] "
  526. "[mapped with %s] [unmapped with %s]\n",
  527. ref->dev_addr, ref->size,
  528. dir2name[entry->direction],
  529. dir2name[ref->direction]);
  530. }
  531. hash_bucket_del(entry);
  532. dma_entry_free(entry);
  533. out:
  534. put_hash_bucket(bucket, &flags);
  535. }
  536. static void check_for_stack(struct device *dev, void *addr)
  537. {
  538. if (object_is_on_stack(addr))
  539. err_printk(dev, NULL, "DMA-API: device driver maps memory from"
  540. "stack [addr=%p]\n", addr);
  541. }
  542. static inline bool overlap(void *addr, u64 size, void *start, void *end)
  543. {
  544. void *addr2 = (char *)addr + size;
  545. return ((addr >= start && addr < end) ||
  546. (addr2 >= start && addr2 < end) ||
  547. ((addr < start) && (addr2 >= end)));
  548. }
  549. static void check_for_illegal_area(struct device *dev, void *addr, u64 size)
  550. {
  551. if (overlap(addr, size, _text, _etext) ||
  552. overlap(addr, size, __start_rodata, __end_rodata))
  553. err_printk(dev, NULL, "DMA-API: device driver maps "
  554. "memory from kernel text or rodata "
  555. "[addr=%p] [size=%llu]\n", addr, size);
  556. }
  557. static void check_sync(struct device *dev, dma_addr_t addr,
  558. u64 size, u64 offset, int direction, bool to_cpu)
  559. {
  560. struct dma_debug_entry ref = {
  561. .dev = dev,
  562. .dev_addr = addr,
  563. .size = size,
  564. .direction = direction,
  565. };
  566. struct dma_debug_entry *entry;
  567. struct hash_bucket *bucket;
  568. unsigned long flags;
  569. bucket = get_hash_bucket(&ref, &flags);
  570. entry = hash_bucket_find(bucket, &ref);
  571. if (!entry) {
  572. err_printk(dev, NULL, "DMA-API: device driver tries "
  573. "to sync DMA memory it has not allocated "
  574. "[device address=0x%016llx] [size=%llu bytes]\n",
  575. (unsigned long long)addr, size);
  576. goto out;
  577. }
  578. if ((offset + size) > entry->size) {
  579. err_printk(dev, entry, "DMA-API: device driver syncs"
  580. " DMA memory outside allocated range "
  581. "[device address=0x%016llx] "
  582. "[allocation size=%llu bytes] [sync offset=%llu] "
  583. "[sync size=%llu]\n", entry->dev_addr, entry->size,
  584. offset, size);
  585. }
  586. if (direction != entry->direction) {
  587. err_printk(dev, entry, "DMA-API: device driver syncs "
  588. "DMA memory with different direction "
  589. "[device address=0x%016llx] [size=%llu bytes] "
  590. "[mapped with %s] [synced with %s]\n",
  591. (unsigned long long)addr, entry->size,
  592. dir2name[entry->direction],
  593. dir2name[direction]);
  594. }
  595. if (entry->direction == DMA_BIDIRECTIONAL)
  596. goto out;
  597. if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
  598. !(direction == DMA_TO_DEVICE))
  599. err_printk(dev, entry, "DMA-API: device driver syncs "
  600. "device read-only DMA memory for cpu "
  601. "[device address=0x%016llx] [size=%llu bytes] "
  602. "[mapped with %s] [synced with %s]\n",
  603. (unsigned long long)addr, entry->size,
  604. dir2name[entry->direction],
  605. dir2name[direction]);
  606. if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
  607. !(direction == DMA_FROM_DEVICE))
  608. err_printk(dev, entry, "DMA-API: device driver syncs "
  609. "device write-only DMA memory to device "
  610. "[device address=0x%016llx] [size=%llu bytes] "
  611. "[mapped with %s] [synced with %s]\n",
  612. (unsigned long long)addr, entry->size,
  613. dir2name[entry->direction],
  614. dir2name[direction]);
  615. out:
  616. put_hash_bucket(bucket, &flags);
  617. }
  618. void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
  619. size_t size, int direction, dma_addr_t dma_addr,
  620. bool map_single)
  621. {
  622. struct dma_debug_entry *entry;
  623. if (unlikely(global_disable))
  624. return;
  625. if (unlikely(dma_mapping_error(dev, dma_addr)))
  626. return;
  627. entry = dma_entry_alloc();
  628. if (!entry)
  629. return;
  630. entry->dev = dev;
  631. entry->type = dma_debug_page;
  632. entry->paddr = page_to_phys(page) + offset;
  633. entry->dev_addr = dma_addr;
  634. entry->size = size;
  635. entry->direction = direction;
  636. if (map_single)
  637. entry->type = dma_debug_single;
  638. if (!PageHighMem(page)) {
  639. void *addr = ((char *)page_address(page)) + offset;
  640. check_for_stack(dev, addr);
  641. check_for_illegal_area(dev, addr, size);
  642. }
  643. add_dma_entry(entry);
  644. }
  645. EXPORT_SYMBOL(debug_dma_map_page);
  646. void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
  647. size_t size, int direction, bool map_single)
  648. {
  649. struct dma_debug_entry ref = {
  650. .type = dma_debug_page,
  651. .dev = dev,
  652. .dev_addr = addr,
  653. .size = size,
  654. .direction = direction,
  655. };
  656. if (unlikely(global_disable))
  657. return;
  658. if (map_single)
  659. ref.type = dma_debug_single;
  660. check_unmap(&ref);
  661. }
  662. EXPORT_SYMBOL(debug_dma_unmap_page);
  663. void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
  664. int nents, int mapped_ents, int direction)
  665. {
  666. struct dma_debug_entry *entry;
  667. struct scatterlist *s;
  668. int i;
  669. if (unlikely(global_disable))
  670. return;
  671. for_each_sg(sg, s, mapped_ents, i) {
  672. entry = dma_entry_alloc();
  673. if (!entry)
  674. return;
  675. entry->type = dma_debug_sg;
  676. entry->dev = dev;
  677. entry->paddr = sg_phys(s);
  678. entry->size = s->length;
  679. entry->dev_addr = s->dma_address;
  680. entry->direction = direction;
  681. entry->sg_call_ents = nents;
  682. entry->sg_mapped_ents = mapped_ents;
  683. if (!PageHighMem(sg_page(s))) {
  684. check_for_stack(dev, sg_virt(s));
  685. check_for_illegal_area(dev, sg_virt(s), s->length);
  686. }
  687. add_dma_entry(entry);
  688. }
  689. }
  690. EXPORT_SYMBOL(debug_dma_map_sg);
  691. void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
  692. int nelems, int dir)
  693. {
  694. struct dma_debug_entry *entry;
  695. struct scatterlist *s;
  696. int mapped_ents = 0, i;
  697. unsigned long flags;
  698. if (unlikely(global_disable))
  699. return;
  700. for_each_sg(sglist, s, nelems, i) {
  701. struct dma_debug_entry ref = {
  702. .type = dma_debug_sg,
  703. .dev = dev,
  704. .paddr = sg_phys(s),
  705. .dev_addr = s->dma_address,
  706. .size = s->length,
  707. .direction = dir,
  708. .sg_call_ents = 0,
  709. };
  710. if (mapped_ents && i >= mapped_ents)
  711. break;
  712. if (mapped_ents == 0) {
  713. struct hash_bucket *bucket;
  714. ref.sg_call_ents = nelems;
  715. bucket = get_hash_bucket(&ref, &flags);
  716. entry = hash_bucket_find(bucket, &ref);
  717. if (entry)
  718. mapped_ents = entry->sg_mapped_ents;
  719. put_hash_bucket(bucket, &flags);
  720. }
  721. check_unmap(&ref);
  722. }
  723. }
  724. EXPORT_SYMBOL(debug_dma_unmap_sg);
  725. void debug_dma_alloc_coherent(struct device *dev, size_t size,
  726. dma_addr_t dma_addr, void *virt)
  727. {
  728. struct dma_debug_entry *entry;
  729. if (unlikely(global_disable))
  730. return;
  731. if (unlikely(virt == NULL))
  732. return;
  733. entry = dma_entry_alloc();
  734. if (!entry)
  735. return;
  736. entry->type = dma_debug_coherent;
  737. entry->dev = dev;
  738. entry->paddr = virt_to_phys(virt);
  739. entry->size = size;
  740. entry->dev_addr = dma_addr;
  741. entry->direction = DMA_BIDIRECTIONAL;
  742. add_dma_entry(entry);
  743. }
  744. EXPORT_SYMBOL(debug_dma_alloc_coherent);
  745. void debug_dma_free_coherent(struct device *dev, size_t size,
  746. void *virt, dma_addr_t addr)
  747. {
  748. struct dma_debug_entry ref = {
  749. .type = dma_debug_coherent,
  750. .dev = dev,
  751. .paddr = virt_to_phys(virt),
  752. .dev_addr = addr,
  753. .size = size,
  754. .direction = DMA_BIDIRECTIONAL,
  755. };
  756. if (unlikely(global_disable))
  757. return;
  758. check_unmap(&ref);
  759. }
  760. EXPORT_SYMBOL(debug_dma_free_coherent);
  761. void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
  762. size_t size, int direction)
  763. {
  764. if (unlikely(global_disable))
  765. return;
  766. check_sync(dev, dma_handle, size, 0, direction, true);
  767. }
  768. EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
  769. void debug_dma_sync_single_for_device(struct device *dev,
  770. dma_addr_t dma_handle, size_t size,
  771. int direction)
  772. {
  773. if (unlikely(global_disable))
  774. return;
  775. check_sync(dev, dma_handle, size, 0, direction, false);
  776. }
  777. EXPORT_SYMBOL(debug_dma_sync_single_for_device);
  778. void debug_dma_sync_single_range_for_cpu(struct device *dev,
  779. dma_addr_t dma_handle,
  780. unsigned long offset, size_t size,
  781. int direction)
  782. {
  783. if (unlikely(global_disable))
  784. return;
  785. check_sync(dev, dma_handle, size, offset, direction, true);
  786. }
  787. EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
  788. void debug_dma_sync_single_range_for_device(struct device *dev,
  789. dma_addr_t dma_handle,
  790. unsigned long offset,
  791. size_t size, int direction)
  792. {
  793. if (unlikely(global_disable))
  794. return;
  795. check_sync(dev, dma_handle, size, offset, direction, false);
  796. }
  797. EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
  798. void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
  799. int nelems, int direction)
  800. {
  801. struct scatterlist *s;
  802. int i;
  803. if (unlikely(global_disable))
  804. return;
  805. for_each_sg(sg, s, nelems, i) {
  806. check_sync(dev, s->dma_address, s->dma_length, 0,
  807. direction, true);
  808. }
  809. }
  810. EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
  811. void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
  812. int nelems, int direction)
  813. {
  814. struct scatterlist *s;
  815. int i;
  816. if (unlikely(global_disable))
  817. return;
  818. for_each_sg(sg, s, nelems, i) {
  819. check_sync(dev, s->dma_address, s->dma_length, 0,
  820. direction, false);
  821. }
  822. }
  823. EXPORT_SYMBOL(debug_dma_sync_sg_for_device);