dma-debug.c 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200
  1. /*
  2. * Copyright (C) 2008 Advanced Micro Devices, Inc.
  3. *
  4. * Author: Joerg Roedel <joerg.roedel@amd.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published
  8. * by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. #include <linux/dma-debug.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/types.h>
  22. #include <linux/list.h>
  23. #define HASH_SIZE 1024ULL
  24. #define HASH_FN_SHIFT 13
  25. #define HASH_FN_MASK (HASH_SIZE - 1)
  26. enum {
  27. dma_debug_single,
  28. dma_debug_page,
  29. dma_debug_sg,
  30. dma_debug_coherent,
  31. };
  32. struct dma_debug_entry {
  33. struct list_head list;
  34. struct device *dev;
  35. int type;
  36. phys_addr_t paddr;
  37. u64 dev_addr;
  38. u64 size;
  39. int direction;
  40. int sg_call_ents;
  41. int sg_mapped_ents;
  42. };
  43. struct hash_bucket {
  44. struct list_head list;
  45. spinlock_t lock;
  46. } __cacheline_aligned_in_smp;
  47. /* Hash list to save the allocated dma addresses */
  48. static struct hash_bucket dma_entry_hash[HASH_SIZE];
  49. /* List of pre-allocated dma_debug_entry's */
  50. static LIST_HEAD(free_entries);
  51. /* Lock for the list above */
  52. static DEFINE_SPINLOCK(free_entries_lock);
  53. /* Global disable flag - will be set in case of an error */
  54. static bool global_disable __read_mostly;
  55. static u32 num_free_entries;
  56. static u32 min_free_entries;
  57. /*
  58. * Hash related functions
  59. *
  60. * Every DMA-API request is saved into a struct dma_debug_entry. To
  61. * have quick access to these structs they are stored into a hash.
  62. */
  63. static int hash_fn(struct dma_debug_entry *entry)
  64. {
  65. /*
  66. * Hash function is based on the dma address.
  67. * We use bits 20-27 here as the index into the hash
  68. */
  69. return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
  70. }
  71. /*
  72. * Request exclusive access to a hash bucket for a given dma_debug_entry.
  73. */
  74. static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
  75. unsigned long *flags)
  76. {
  77. int idx = hash_fn(entry);
  78. unsigned long __flags;
  79. spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
  80. *flags = __flags;
  81. return &dma_entry_hash[idx];
  82. }
  83. /*
  84. * Give up exclusive access to the hash bucket
  85. */
  86. static void put_hash_bucket(struct hash_bucket *bucket,
  87. unsigned long *flags)
  88. {
  89. unsigned long __flags = *flags;
  90. spin_unlock_irqrestore(&bucket->lock, __flags);
  91. }
  92. /*
  93. * Search a given entry in the hash bucket list
  94. */
  95. static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket,
  96. struct dma_debug_entry *ref)
  97. {
  98. struct dma_debug_entry *entry;
  99. list_for_each_entry(entry, &bucket->list, list) {
  100. if ((entry->dev_addr == ref->dev_addr) &&
  101. (entry->dev == ref->dev))
  102. return entry;
  103. }
  104. return NULL;
  105. }
  106. /*
  107. * Add an entry to a hash bucket
  108. */
  109. static void hash_bucket_add(struct hash_bucket *bucket,
  110. struct dma_debug_entry *entry)
  111. {
  112. list_add_tail(&entry->list, &bucket->list);
  113. }
  114. /*
  115. * Remove entry from a hash bucket list
  116. */
  117. static void hash_bucket_del(struct dma_debug_entry *entry)
  118. {
  119. list_del(&entry->list);
  120. }
  121. /*
  122. * Wrapper function for adding an entry to the hash.
  123. * This function takes care of locking itself.
  124. */
  125. static void add_dma_entry(struct dma_debug_entry *entry)
  126. {
  127. struct hash_bucket *bucket;
  128. unsigned long flags;
  129. bucket = get_hash_bucket(entry, &flags);
  130. hash_bucket_add(bucket, entry);
  131. put_hash_bucket(bucket, &flags);
  132. }
  133. /* struct dma_entry allocator
  134. *
  135. * The next two functions implement the allocator for
  136. * struct dma_debug_entries.
  137. */
  138. static struct dma_debug_entry *dma_entry_alloc(void)
  139. {
  140. struct dma_debug_entry *entry = NULL;
  141. unsigned long flags;
  142. spin_lock_irqsave(&free_entries_lock, flags);
  143. if (list_empty(&free_entries)) {
  144. printk(KERN_ERR "DMA-API: debugging out of memory "
  145. "- disabling\n");
  146. global_disable = true;
  147. goto out;
  148. }
  149. entry = list_entry(free_entries.next, struct dma_debug_entry, list);
  150. list_del(&entry->list);
  151. memset(entry, 0, sizeof(*entry));
  152. num_free_entries -= 1;
  153. if (num_free_entries < min_free_entries)
  154. min_free_entries = num_free_entries;
  155. out:
  156. spin_unlock_irqrestore(&free_entries_lock, flags);
  157. return entry;
  158. }
  159. static void dma_entry_free(struct dma_debug_entry *entry)
  160. {
  161. unsigned long flags;
  162. /*
  163. * add to beginning of the list - this way the entries are
  164. * more likely cache hot when they are reallocated.
  165. */
  166. spin_lock_irqsave(&free_entries_lock, flags);
  167. list_add(&entry->list, &free_entries);
  168. num_free_entries += 1;
  169. spin_unlock_irqrestore(&free_entries_lock, flags);
  170. }