xencomm.c 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License as published by
  4. * the Free Software Foundation; either version 2 of the License, or
  5. * (at your option) any later version.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. * You should have received a copy of the GNU General Public License
  13. * along with this program; if not, write to the Free Software
  14. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  15. *
  16. * Copyright (C) IBM Corp. 2006
  17. *
  18. * Authors: Hollis Blanchard <hollisb@us.ibm.com>
  19. */
  20. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  21. #include <linux/mm.h>
  22. #include <linux/slab.h>
  23. #include <asm/page.h>
  24. #include <xen/xencomm.h>
  25. #include <xen/interface/xen.h>
  26. #include <asm/xen/xencomm.h> /* for xencomm_is_phys_contiguous() */
  27. static int xencomm_init(struct xencomm_desc *desc,
  28. void *buffer, unsigned long bytes)
  29. {
  30. unsigned long recorded = 0;
  31. int i = 0;
  32. while ((recorded < bytes) && (i < desc->nr_addrs)) {
  33. unsigned long vaddr = (unsigned long)buffer + recorded;
  34. unsigned long paddr;
  35. int offset;
  36. int chunksz;
  37. offset = vaddr % PAGE_SIZE; /* handle partial pages */
  38. chunksz = min(PAGE_SIZE - offset, bytes - recorded);
  39. paddr = xencomm_vtop(vaddr);
  40. if (paddr == ~0UL) {
  41. printk(KERN_DEBUG "%s: couldn't translate vaddr %lx\n",
  42. __func__, vaddr);
  43. return -EINVAL;
  44. }
  45. desc->address[i++] = paddr;
  46. recorded += chunksz;
  47. }
  48. if (recorded < bytes) {
  49. printk(KERN_DEBUG
  50. "%s: could only translate %ld of %ld bytes\n",
  51. __func__, recorded, bytes);
  52. return -ENOSPC;
  53. }
  54. /* mark remaining addresses invalid (just for safety) */
  55. while (i < desc->nr_addrs)
  56. desc->address[i++] = XENCOMM_INVALID;
  57. desc->magic = XENCOMM_MAGIC;
  58. return 0;
  59. }
  60. static struct xencomm_desc *xencomm_alloc(gfp_t gfp_mask,
  61. void *buffer, unsigned long bytes)
  62. {
  63. struct xencomm_desc *desc;
  64. unsigned long buffer_ulong = (unsigned long)buffer;
  65. unsigned long start = buffer_ulong & PAGE_MASK;
  66. unsigned long end = (buffer_ulong + bytes) | ~PAGE_MASK;
  67. unsigned long nr_addrs = (end - start + 1) >> PAGE_SHIFT;
  68. unsigned long size = sizeof(*desc) +
  69. sizeof(desc->address[0]) * nr_addrs;
  70. /*
  71. * slab allocator returns at least sizeof(void*) aligned pointer.
  72. * When sizeof(*desc) > sizeof(void*), struct xencomm_desc might
  73. * cross page boundary.
  74. */
  75. if (sizeof(*desc) > sizeof(void *)) {
  76. unsigned long order = get_order(size);
  77. desc = (struct xencomm_desc *)__get_free_pages(gfp_mask,
  78. order);
  79. if (desc == NULL)
  80. return NULL;
  81. desc->nr_addrs =
  82. ((PAGE_SIZE << order) - sizeof(struct xencomm_desc)) /
  83. sizeof(*desc->address);
  84. } else {
  85. desc = kmalloc(size, gfp_mask);
  86. if (desc == NULL)
  87. return NULL;
  88. desc->nr_addrs = nr_addrs;
  89. }
  90. return desc;
  91. }
  92. void xencomm_free(struct xencomm_handle *desc)
  93. {
  94. if (desc && !((ulong)desc & XENCOMM_INLINE_FLAG)) {
  95. struct xencomm_desc *desc__ = (struct xencomm_desc *)desc;
  96. if (sizeof(*desc__) > sizeof(void *)) {
  97. unsigned long size = sizeof(*desc__) +
  98. sizeof(desc__->address[0]) * desc__->nr_addrs;
  99. unsigned long order = get_order(size);
  100. free_pages((unsigned long)__va(desc), order);
  101. } else
  102. kfree(__va(desc));
  103. }
  104. }
  105. static int xencomm_create(void *buffer, unsigned long bytes,
  106. struct xencomm_desc **ret, gfp_t gfp_mask)
  107. {
  108. struct xencomm_desc *desc;
  109. int rc;
  110. pr_debug("%s: %p[%ld]\n", __func__, buffer, bytes);
  111. if (bytes == 0) {
  112. /* don't create a descriptor; Xen recognizes NULL. */
  113. BUG_ON(buffer != NULL);
  114. *ret = NULL;
  115. return 0;
  116. }
  117. BUG_ON(buffer == NULL); /* 'bytes' is non-zero */
  118. desc = xencomm_alloc(gfp_mask, buffer, bytes);
  119. if (!desc) {
  120. printk(KERN_DEBUG "%s failure\n", "xencomm_alloc");
  121. return -ENOMEM;
  122. }
  123. rc = xencomm_init(desc, buffer, bytes);
  124. if (rc) {
  125. printk(KERN_DEBUG "%s failure: %d\n", "xencomm_init", rc);
  126. xencomm_free((struct xencomm_handle *)__pa(desc));
  127. return rc;
  128. }
  129. *ret = desc;
  130. return 0;
  131. }
  132. static struct xencomm_handle *xencomm_create_inline(void *ptr)
  133. {
  134. unsigned long paddr;
  135. BUG_ON(!xencomm_is_phys_contiguous((unsigned long)ptr));
  136. paddr = (unsigned long)xencomm_pa(ptr);
  137. BUG_ON(paddr & XENCOMM_INLINE_FLAG);
  138. return (struct xencomm_handle *)(paddr | XENCOMM_INLINE_FLAG);
  139. }
  140. /* "mini" routine, for stack-based communications: */
  141. static int xencomm_create_mini(void *buffer,
  142. unsigned long bytes, struct xencomm_mini *xc_desc,
  143. struct xencomm_desc **ret)
  144. {
  145. int rc = 0;
  146. struct xencomm_desc *desc;
  147. BUG_ON(((unsigned long)xc_desc) % sizeof(*xc_desc) != 0);
  148. desc = (void *)xc_desc;
  149. desc->nr_addrs = XENCOMM_MINI_ADDRS;
  150. rc = xencomm_init(desc, buffer, bytes);
  151. if (!rc)
  152. *ret = desc;
  153. return rc;
  154. }
  155. struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes)
  156. {
  157. int rc;
  158. struct xencomm_desc *desc;
  159. if (xencomm_is_phys_contiguous((unsigned long)ptr))
  160. return xencomm_create_inline(ptr);
  161. rc = xencomm_create(ptr, bytes, &desc, GFP_KERNEL);
  162. if (rc || desc == NULL)
  163. return NULL;
  164. return xencomm_pa(desc);
  165. }
  166. struct xencomm_handle *__xencomm_map_no_alloc(void *ptr, unsigned long bytes,
  167. struct xencomm_mini *xc_desc)
  168. {
  169. int rc;
  170. struct xencomm_desc *desc = NULL;
  171. if (xencomm_is_phys_contiguous((unsigned long)ptr))
  172. return xencomm_create_inline(ptr);
  173. rc = xencomm_create_mini(ptr, bytes, xc_desc,
  174. &desc);
  175. if (rc)
  176. return NULL;
  177. return xencomm_pa(desc);
  178. }