cvmx-fpa.h 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299
  1. /***********************license start***************
  2. * Author: Cavium Networks
  3. *
  4. * Contact: support@caviumnetworks.com
  5. * This file is part of the OCTEON SDK
  6. *
  7. * Copyright (c) 2003-2008 Cavium Networks
  8. *
  9. * This file is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License, Version 2, as
  11. * published by the Free Software Foundation.
  12. *
  13. * This file is distributed in the hope that it will be useful, but
  14. * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
  15. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  16. * NONINFRINGEMENT. See the GNU General Public License for more
  17. * details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this file; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  22. * or visit http://www.gnu.org/licenses/.
  23. *
  24. * This file may also be available under a different license from Cavium.
  25. * Contact Cavium Networks for more information
  26. ***********************license end**************************************/
  27. /**
  28. * @file
  29. *
  30. * Interface to the hardware Free Pool Allocator.
  31. *
  32. *
  33. */
  34. #ifndef __CVMX_FPA_H__
  35. #define __CVMX_FPA_H__
  36. #include "cvmx-address.h"
  37. #include "cvmx-fpa-defs.h"
  38. #define CVMX_FPA_NUM_POOLS 8
  39. #define CVMX_FPA_MIN_BLOCK_SIZE 128
  40. #define CVMX_FPA_ALIGNMENT 128
  41. /**
  42. * Structure describing the data format used for stores to the FPA.
  43. */
  44. typedef union {
  45. uint64_t u64;
  46. struct {
  47. /*
  48. * the (64-bit word) location in scratchpad to write
  49. * to (if len != 0)
  50. */
  51. uint64_t scraddr:8;
  52. /* the number of words in the response (0 => no response) */
  53. uint64_t len:8;
  54. /* the ID of the device on the non-coherent bus */
  55. uint64_t did:8;
  56. /*
  57. * the address that will appear in the first tick on
  58. * the NCB bus.
  59. */
  60. uint64_t addr:40;
  61. } s;
  62. } cvmx_fpa_iobdma_data_t;
  63. /**
  64. * Structure describing the current state of a FPA pool.
  65. */
  66. typedef struct {
  67. /* Name it was created under */
  68. const char *name;
  69. /* Size of each block */
  70. uint64_t size;
  71. /* The base memory address of whole block */
  72. void *base;
  73. /* The number of elements in the pool at creation */
  74. uint64_t starting_element_count;
  75. } cvmx_fpa_pool_info_t;
  76. /**
  77. * Current state of all the pools. Use access functions
  78. * instead of using it directly.
  79. */
  80. extern cvmx_fpa_pool_info_t cvmx_fpa_pool_info[CVMX_FPA_NUM_POOLS];
  81. /* CSR typedefs have been moved to cvmx-csr-*.h */
  82. /**
  83. * Return the name of the pool
  84. *
  85. * @pool: Pool to get the name of
  86. * Returns The name
  87. */
  88. static inline const char *cvmx_fpa_get_name(uint64_t pool)
  89. {
  90. return cvmx_fpa_pool_info[pool].name;
  91. }
  92. /**
  93. * Return the base of the pool
  94. *
  95. * @pool: Pool to get the base of
  96. * Returns The base
  97. */
  98. static inline void *cvmx_fpa_get_base(uint64_t pool)
  99. {
  100. return cvmx_fpa_pool_info[pool].base;
  101. }
  102. /**
  103. * Check if a pointer belongs to an FPA pool. Return non-zero
  104. * if the supplied pointer is inside the memory controlled by
  105. * an FPA pool.
  106. *
  107. * @pool: Pool to check
  108. * @ptr: Pointer to check
  109. * Returns Non-zero if pointer is in the pool. Zero if not
  110. */
  111. static inline int cvmx_fpa_is_member(uint64_t pool, void *ptr)
  112. {
  113. return ((ptr >= cvmx_fpa_pool_info[pool].base) &&
  114. ((char *)ptr <
  115. ((char *)(cvmx_fpa_pool_info[pool].base)) +
  116. cvmx_fpa_pool_info[pool].size *
  117. cvmx_fpa_pool_info[pool].starting_element_count));
  118. }
  119. /**
  120. * Enable the FPA for use. Must be performed after any CSR
  121. * configuration but before any other FPA functions.
  122. */
  123. static inline void cvmx_fpa_enable(void)
  124. {
  125. union cvmx_fpa_ctl_status status;
  126. status.u64 = cvmx_read_csr(CVMX_FPA_CTL_STATUS);
  127. if (status.s.enb) {
  128. cvmx_dprintf
  129. ("Warning: Enabling FPA when FPA already enabled.\n");
  130. }
  131. /*
  132. * Do runtime check as we allow pass1 compiled code to run on
  133. * pass2 chips.
  134. */
  135. if (cvmx_octeon_is_pass1()) {
  136. union cvmx_fpa_fpfx_marks marks;
  137. int i;
  138. for (i = 1; i < 8; i++) {
  139. marks.u64 =
  140. cvmx_read_csr(CVMX_FPA_FPF1_MARKS + (i - 1) * 8ull);
  141. marks.s.fpf_wr = 0xe0;
  142. cvmx_write_csr(CVMX_FPA_FPF1_MARKS + (i - 1) * 8ull,
  143. marks.u64);
  144. }
  145. /* Enforce a 10 cycle delay between config and enable */
  146. cvmx_wait(10);
  147. }
  148. /* FIXME: CVMX_FPA_CTL_STATUS read is unmodelled */
  149. status.u64 = 0;
  150. status.s.enb = 1;
  151. cvmx_write_csr(CVMX_FPA_CTL_STATUS, status.u64);
  152. }
  153. /**
  154. * Get a new block from the FPA
  155. *
  156. * @pool: Pool to get the block from
  157. * Returns Pointer to the block or NULL on failure
  158. */
  159. static inline void *cvmx_fpa_alloc(uint64_t pool)
  160. {
  161. uint64_t address =
  162. cvmx_read_csr(CVMX_ADDR_DID(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool)));
  163. if (address)
  164. return cvmx_phys_to_ptr(address);
  165. else
  166. return NULL;
  167. }
  168. /**
  169. * Asynchronously get a new block from the FPA
  170. *
  171. * @scr_addr: Local scratch address to put response in. This is a byte address,
  172. * but must be 8 byte aligned.
  173. * @pool: Pool to get the block from
  174. */
  175. static inline void cvmx_fpa_async_alloc(uint64_t scr_addr, uint64_t pool)
  176. {
  177. cvmx_fpa_iobdma_data_t data;
  178. /*
  179. * Hardware only uses 64 bit aligned locations, so convert
  180. * from byte address to 64-bit index
  181. */
  182. data.s.scraddr = scr_addr >> 3;
  183. data.s.len = 1;
  184. data.s.did = CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool);
  185. data.s.addr = 0;
  186. cvmx_send_single(data.u64);
  187. }
  188. /**
  189. * Free a block allocated with a FPA pool. Does NOT provide memory
  190. * ordering in cases where the memory block was modified by the core.
  191. *
  192. * @ptr: Block to free
  193. * @pool: Pool to put it in
  194. * @num_cache_lines:
  195. * Cache lines to invalidate
  196. */
  197. static inline void cvmx_fpa_free_nosync(void *ptr, uint64_t pool,
  198. uint64_t num_cache_lines)
  199. {
  200. cvmx_addr_t newptr;
  201. newptr.u64 = cvmx_ptr_to_phys(ptr);
  202. newptr.sfilldidspace.didspace =
  203. CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool));
  204. /* Prevent GCC from reordering around free */
  205. barrier();
  206. /* value written is number of cache lines not written back */
  207. cvmx_write_io(newptr.u64, num_cache_lines);
  208. }
  209. /**
  210. * Free a block allocated with a FPA pool. Provides required memory
  211. * ordering in cases where memory block was modified by core.
  212. *
  213. * @ptr: Block to free
  214. * @pool: Pool to put it in
  215. * @num_cache_lines:
  216. * Cache lines to invalidate
  217. */
  218. static inline void cvmx_fpa_free(void *ptr, uint64_t pool,
  219. uint64_t num_cache_lines)
  220. {
  221. cvmx_addr_t newptr;
  222. newptr.u64 = cvmx_ptr_to_phys(ptr);
  223. newptr.sfilldidspace.didspace =
  224. CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool));
  225. /*
  226. * Make sure that any previous writes to memory go out before
  227. * we free this buffer. This also serves as a barrier to
  228. * prevent GCC from reordering operations to after the
  229. * free.
  230. */
  231. CVMX_SYNCWS;
  232. /* value written is number of cache lines not written back */
  233. cvmx_write_io(newptr.u64, num_cache_lines);
  234. }
  235. /**
  236. * Setup a FPA pool to control a new block of memory.
  237. * This can only be called once per pool. Make sure proper
  238. * locking enforces this.
  239. *
  240. * @pool: Pool to initialize
  241. * 0 <= pool < 8
  242. * @name: Constant character string to name this pool.
  243. * String is not copied.
  244. * @buffer: Pointer to the block of memory to use. This must be
  245. * accessible by all processors and external hardware.
  246. * @block_size: Size for each block controlled by the FPA
  247. * @num_blocks: Number of blocks
  248. *
  249. * Returns 0 on Success,
  250. * -1 on failure
  251. */
  252. extern int cvmx_fpa_setup_pool(uint64_t pool, const char *name, void *buffer,
  253. uint64_t block_size, uint64_t num_blocks);
  254. /**
  255. * Shutdown a Memory pool and validate that it had all of
  256. * the buffers originally placed in it. This should only be
  257. * called by one processor after all hardware has finished
  258. * using the pool.
  259. *
  260. * @pool: Pool to shutdown
  261. * Returns Zero on success
  262. * - Positive is count of missing buffers
  263. * - Negative is too many buffers or corrupted pointers
  264. */
  265. extern uint64_t cvmx_fpa_shutdown_pool(uint64_t pool);
  266. /**
  267. * Get the size of blocks controlled by the pool
  268. * This is resolved to a constant at compile time.
  269. *
  270. * @pool: Pool to access
  271. * Returns Size of the block in bytes
  272. */
  273. uint64_t cvmx_fpa_get_block_size(uint64_t pool);
  274. #endif /* __CVM_FPA_H__ */