cvmx-l2c.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734
  1. /***********************license start***************
  2. * Author: Cavium Networks
  3. *
  4. * Contact: support@caviumnetworks.com
  5. * This file is part of the OCTEON SDK
  6. *
  7. * Copyright (c) 2003-2008 Cavium Networks
  8. *
  9. * This file is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License, Version 2, as
  11. * published by the Free Software Foundation.
  12. *
  13. * This file is distributed in the hope that it will be useful, but
  14. * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
  15. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  16. * NONINFRINGEMENT. See the GNU General Public License for more
  17. * details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this file; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  22. * or visit http://www.gnu.org/licenses/.
  23. *
  24. * This file may also be available under a different license from Cavium.
  25. * Contact Cavium Networks for more information
  26. ***********************license end**************************************/
  27. /*
  28. * Implementation of the Level 2 Cache (L2C) control, measurement, and
  29. * debugging facilities.
  30. */
  31. #include <asm/octeon/cvmx.h>
  32. #include <asm/octeon/cvmx-l2c.h>
  33. #include <asm/octeon/cvmx-spinlock.h>
  34. /*
  35. * This spinlock is used internally to ensure that only one core is
  36. * performing certain L2 operations at a time.
  37. *
  38. * NOTE: This only protects calls from within a single application -
  39. * if multiple applications or operating systems are running, then it
  40. * is up to the user program to coordinate between them.
  41. */
  42. static cvmx_spinlock_t cvmx_l2c_spinlock;
  43. static inline int l2_size_half(void)
  44. {
  45. uint64_t val = cvmx_read_csr(CVMX_L2D_FUS3);
  46. return !!(val & (1ull << 34));
  47. }
  48. int cvmx_l2c_get_core_way_partition(uint32_t core)
  49. {
  50. uint32_t field;
  51. /* Validate the core number */
  52. if (core >= cvmx_octeon_num_cores())
  53. return -1;
  54. /*
  55. * Use the lower two bits of the coreNumber to determine the
  56. * bit offset of the UMSK[] field in the L2C_SPAR register.
  57. */
  58. field = (core & 0x3) * 8;
  59. /*
  60. * Return the UMSK[] field from the appropriate L2C_SPAR
  61. * register based on the coreNumber.
  62. */
  63. switch (core & 0xC) {
  64. case 0x0:
  65. return (cvmx_read_csr(CVMX_L2C_SPAR0) & (0xFF << field)) >>
  66. field;
  67. case 0x4:
  68. return (cvmx_read_csr(CVMX_L2C_SPAR1) & (0xFF << field)) >>
  69. field;
  70. case 0x8:
  71. return (cvmx_read_csr(CVMX_L2C_SPAR2) & (0xFF << field)) >>
  72. field;
  73. case 0xC:
  74. return (cvmx_read_csr(CVMX_L2C_SPAR3) & (0xFF << field)) >>
  75. field;
  76. }
  77. return 0;
  78. }
  79. int cvmx_l2c_set_core_way_partition(uint32_t core, uint32_t mask)
  80. {
  81. uint32_t field;
  82. uint32_t valid_mask;
  83. valid_mask = (0x1 << cvmx_l2c_get_num_assoc()) - 1;
  84. mask &= valid_mask;
  85. /* A UMSK setting which blocks all L2C Ways is an error. */
  86. if (mask == valid_mask)
  87. return -1;
  88. /* Validate the core number */
  89. if (core >= cvmx_octeon_num_cores())
  90. return -1;
  91. /* Check to make sure current mask & new mask don't block all ways */
  92. if (((mask | cvmx_l2c_get_core_way_partition(core)) & valid_mask) ==
  93. valid_mask)
  94. return -1;
  95. /* Use the lower two bits of core to determine the bit offset of the
  96. * UMSK[] field in the L2C_SPAR register.
  97. */
  98. field = (core & 0x3) * 8;
  99. /* Assign the new mask setting to the UMSK[] field in the appropriate
  100. * L2C_SPAR register based on the core_num.
  101. *
  102. */
  103. switch (core & 0xC) {
  104. case 0x0:
  105. cvmx_write_csr(CVMX_L2C_SPAR0,
  106. (cvmx_read_csr(CVMX_L2C_SPAR0) &
  107. ~(0xFF << field)) | mask << field);
  108. break;
  109. case 0x4:
  110. cvmx_write_csr(CVMX_L2C_SPAR1,
  111. (cvmx_read_csr(CVMX_L2C_SPAR1) &
  112. ~(0xFF << field)) | mask << field);
  113. break;
  114. case 0x8:
  115. cvmx_write_csr(CVMX_L2C_SPAR2,
  116. (cvmx_read_csr(CVMX_L2C_SPAR2) &
  117. ~(0xFF << field)) | mask << field);
  118. break;
  119. case 0xC:
  120. cvmx_write_csr(CVMX_L2C_SPAR3,
  121. (cvmx_read_csr(CVMX_L2C_SPAR3) &
  122. ~(0xFF << field)) | mask << field);
  123. break;
  124. }
  125. return 0;
  126. }
  127. int cvmx_l2c_set_hw_way_partition(uint32_t mask)
  128. {
  129. uint32_t valid_mask;
  130. valid_mask = 0xff;
  131. if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN38XX)) {
  132. if (l2_size_half())
  133. valid_mask = 0xf;
  134. } else if (l2_size_half())
  135. valid_mask = 0x3;
  136. mask &= valid_mask;
  137. /* A UMSK setting which blocks all L2C Ways is an error. */
  138. if (mask == valid_mask)
  139. return -1;
  140. /* Check to make sure current mask & new mask don't block all ways */
  141. if (((mask | cvmx_l2c_get_hw_way_partition()) & valid_mask) ==
  142. valid_mask)
  143. return -1;
  144. cvmx_write_csr(CVMX_L2C_SPAR4,
  145. (cvmx_read_csr(CVMX_L2C_SPAR4) & ~0xFF) | mask);
  146. return 0;
  147. }
  148. int cvmx_l2c_get_hw_way_partition(void)
  149. {
  150. return cvmx_read_csr(CVMX_L2C_SPAR4) & (0xFF);
  151. }
  152. void cvmx_l2c_config_perf(uint32_t counter, enum cvmx_l2c_event event,
  153. uint32_t clear_on_read)
  154. {
  155. union cvmx_l2c_pfctl pfctl;
  156. pfctl.u64 = cvmx_read_csr(CVMX_L2C_PFCTL);
  157. switch (counter) {
  158. case 0:
  159. pfctl.s.cnt0sel = event;
  160. pfctl.s.cnt0ena = 1;
  161. if (!cvmx_octeon_is_pass1())
  162. pfctl.s.cnt0rdclr = clear_on_read;
  163. break;
  164. case 1:
  165. pfctl.s.cnt1sel = event;
  166. pfctl.s.cnt1ena = 1;
  167. if (!cvmx_octeon_is_pass1())
  168. pfctl.s.cnt1rdclr = clear_on_read;
  169. break;
  170. case 2:
  171. pfctl.s.cnt2sel = event;
  172. pfctl.s.cnt2ena = 1;
  173. if (!cvmx_octeon_is_pass1())
  174. pfctl.s.cnt2rdclr = clear_on_read;
  175. break;
  176. case 3:
  177. default:
  178. pfctl.s.cnt3sel = event;
  179. pfctl.s.cnt3ena = 1;
  180. if (!cvmx_octeon_is_pass1())
  181. pfctl.s.cnt3rdclr = clear_on_read;
  182. break;
  183. }
  184. cvmx_write_csr(CVMX_L2C_PFCTL, pfctl.u64);
  185. }
  186. uint64_t cvmx_l2c_read_perf(uint32_t counter)
  187. {
  188. switch (counter) {
  189. case 0:
  190. return cvmx_read_csr(CVMX_L2C_PFC0);
  191. case 1:
  192. return cvmx_read_csr(CVMX_L2C_PFC1);
  193. case 2:
  194. return cvmx_read_csr(CVMX_L2C_PFC2);
  195. case 3:
  196. default:
  197. return cvmx_read_csr(CVMX_L2C_PFC3);
  198. }
  199. }
  200. /**
  201. * @INTERNAL
  202. * Helper function use to fault in cache lines for L2 cache locking
  203. *
  204. * @addr: Address of base of memory region to read into L2 cache
  205. * @len: Length (in bytes) of region to fault in
  206. */
  207. static void fault_in(uint64_t addr, int len)
  208. {
  209. volatile char *ptr;
  210. volatile char dummy;
  211. /*
  212. * Adjust addr and length so we get all cache lines even for
  213. * small ranges spanning two cache lines
  214. */
  215. len += addr & CVMX_CACHE_LINE_MASK;
  216. addr &= ~CVMX_CACHE_LINE_MASK;
  217. ptr = (volatile char *)cvmx_phys_to_ptr(addr);
  218. /*
  219. * Invalidate L1 cache to make sure all loads result in data
  220. * being in L2.
  221. */
  222. CVMX_DCACHE_INVALIDATE;
  223. while (len > 0) {
  224. dummy += *ptr;
  225. len -= CVMX_CACHE_LINE_SIZE;
  226. ptr += CVMX_CACHE_LINE_SIZE;
  227. }
  228. }
  229. int cvmx_l2c_lock_line(uint64_t addr)
  230. {
  231. int retval = 0;
  232. union cvmx_l2c_dbg l2cdbg;
  233. union cvmx_l2c_lckbase lckbase;
  234. union cvmx_l2c_lckoff lckoff;
  235. union cvmx_l2t_err l2t_err;
  236. l2cdbg.u64 = 0;
  237. lckbase.u64 = 0;
  238. lckoff.u64 = 0;
  239. cvmx_spinlock_lock(&cvmx_l2c_spinlock);
  240. /* Clear l2t error bits if set */
  241. l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
  242. l2t_err.s.lckerr = 1;
  243. l2t_err.s.lckerr2 = 1;
  244. cvmx_write_csr(CVMX_L2T_ERR, l2t_err.u64);
  245. addr &= ~CVMX_CACHE_LINE_MASK;
  246. /* Set this core as debug core */
  247. l2cdbg.s.ppnum = cvmx_get_core_num();
  248. CVMX_SYNC;
  249. cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
  250. cvmx_read_csr(CVMX_L2C_DBG);
  251. lckoff.s.lck_offset = 0; /* Only lock 1 line at a time */
  252. cvmx_write_csr(CVMX_L2C_LCKOFF, lckoff.u64);
  253. cvmx_read_csr(CVMX_L2C_LCKOFF);
  254. if (((union cvmx_l2c_cfg) (cvmx_read_csr(CVMX_L2C_CFG))).s.idxalias) {
  255. int alias_shift =
  256. CVMX_L2C_IDX_ADDR_SHIFT + 2 * CVMX_L2_SET_BITS - 1;
  257. uint64_t addr_tmp =
  258. addr ^ (addr & ((1 << alias_shift) - 1)) >>
  259. CVMX_L2_SET_BITS;
  260. lckbase.s.lck_base = addr_tmp >> 7;
  261. } else {
  262. lckbase.s.lck_base = addr >> 7;
  263. }
  264. lckbase.s.lck_ena = 1;
  265. cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64);
  266. cvmx_read_csr(CVMX_L2C_LCKBASE); /* Make sure it gets there */
  267. fault_in(addr, CVMX_CACHE_LINE_SIZE);
  268. lckbase.s.lck_ena = 0;
  269. cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64);
  270. cvmx_read_csr(CVMX_L2C_LCKBASE); /* Make sure it gets there */
  271. /* Stop being debug core */
  272. cvmx_write_csr(CVMX_L2C_DBG, 0);
  273. cvmx_read_csr(CVMX_L2C_DBG);
  274. l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
  275. if (l2t_err.s.lckerr || l2t_err.s.lckerr2)
  276. retval = 1; /* We were unable to lock the line */
  277. cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
  278. return retval;
  279. }
  280. int cvmx_l2c_lock_mem_region(uint64_t start, uint64_t len)
  281. {
  282. int retval = 0;
  283. /* Round start/end to cache line boundaries */
  284. len += start & CVMX_CACHE_LINE_MASK;
  285. start &= ~CVMX_CACHE_LINE_MASK;
  286. len = (len + CVMX_CACHE_LINE_MASK) & ~CVMX_CACHE_LINE_MASK;
  287. while (len) {
  288. retval += cvmx_l2c_lock_line(start);
  289. start += CVMX_CACHE_LINE_SIZE;
  290. len -= CVMX_CACHE_LINE_SIZE;
  291. }
  292. return retval;
  293. }
  294. void cvmx_l2c_flush(void)
  295. {
  296. uint64_t assoc, set;
  297. uint64_t n_assoc, n_set;
  298. union cvmx_l2c_dbg l2cdbg;
  299. cvmx_spinlock_lock(&cvmx_l2c_spinlock);
  300. l2cdbg.u64 = 0;
  301. if (!OCTEON_IS_MODEL(OCTEON_CN30XX))
  302. l2cdbg.s.ppnum = cvmx_get_core_num();
  303. l2cdbg.s.finv = 1;
  304. n_set = CVMX_L2_SETS;
  305. n_assoc = l2_size_half() ? (CVMX_L2_ASSOC / 2) : CVMX_L2_ASSOC;
  306. for (set = 0; set < n_set; set++) {
  307. for (assoc = 0; assoc < n_assoc; assoc++) {
  308. l2cdbg.s.set = assoc;
  309. /* Enter debug mode, and make sure all other
  310. ** writes complete before we enter debug
  311. ** mode */
  312. CVMX_SYNCW;
  313. cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
  314. cvmx_read_csr(CVMX_L2C_DBG);
  315. CVMX_PREPARE_FOR_STORE(CVMX_ADD_SEG
  316. (CVMX_MIPS_SPACE_XKPHYS,
  317. set * CVMX_CACHE_LINE_SIZE), 0);
  318. CVMX_SYNCW; /* Push STF out to L2 */
  319. /* Exit debug mode */
  320. CVMX_SYNC;
  321. cvmx_write_csr(CVMX_L2C_DBG, 0);
  322. cvmx_read_csr(CVMX_L2C_DBG);
  323. }
  324. }
  325. cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
  326. }
  327. int cvmx_l2c_unlock_line(uint64_t address)
  328. {
  329. int assoc;
  330. union cvmx_l2c_tag tag;
  331. union cvmx_l2c_dbg l2cdbg;
  332. uint32_t tag_addr;
  333. uint32_t index = cvmx_l2c_address_to_index(address);
  334. cvmx_spinlock_lock(&cvmx_l2c_spinlock);
  335. /* Compute portion of address that is stored in tag */
  336. tag_addr =
  337. ((address >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) &
  338. ((1 << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) - 1));
  339. for (assoc = 0; assoc < CVMX_L2_ASSOC; assoc++) {
  340. tag = cvmx_get_l2c_tag(assoc, index);
  341. if (tag.s.V && (tag.s.addr == tag_addr)) {
  342. l2cdbg.u64 = 0;
  343. l2cdbg.s.ppnum = cvmx_get_core_num();
  344. l2cdbg.s.set = assoc;
  345. l2cdbg.s.finv = 1;
  346. CVMX_SYNC;
  347. /* Enter debug mode */
  348. cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
  349. cvmx_read_csr(CVMX_L2C_DBG);
  350. CVMX_PREPARE_FOR_STORE(CVMX_ADD_SEG
  351. (CVMX_MIPS_SPACE_XKPHYS,
  352. address), 0);
  353. CVMX_SYNC;
  354. /* Exit debug mode */
  355. cvmx_write_csr(CVMX_L2C_DBG, 0);
  356. cvmx_read_csr(CVMX_L2C_DBG);
  357. cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
  358. return tag.s.L;
  359. }
  360. }
  361. cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
  362. return 0;
  363. }
  364. int cvmx_l2c_unlock_mem_region(uint64_t start, uint64_t len)
  365. {
  366. int num_unlocked = 0;
  367. /* Round start/end to cache line boundaries */
  368. len += start & CVMX_CACHE_LINE_MASK;
  369. start &= ~CVMX_CACHE_LINE_MASK;
  370. len = (len + CVMX_CACHE_LINE_MASK) & ~CVMX_CACHE_LINE_MASK;
  371. while (len > 0) {
  372. num_unlocked += cvmx_l2c_unlock_line(start);
  373. start += CVMX_CACHE_LINE_SIZE;
  374. len -= CVMX_CACHE_LINE_SIZE;
  375. }
  376. return num_unlocked;
  377. }
  378. /*
  379. * Internal l2c tag types. These are converted to a generic structure
  380. * that can be used on all chips.
  381. */
  382. union __cvmx_l2c_tag {
  383. uint64_t u64;
  384. struct cvmx_l2c_tag_cn50xx {
  385. uint64_t reserved:40;
  386. uint64_t V:1; /* Line valid */
  387. uint64_t D:1; /* Line dirty */
  388. uint64_t L:1; /* Line locked */
  389. uint64_t U:1; /* Use, LRU eviction */
  390. uint64_t addr:20; /* Phys mem addr (33..14) */
  391. } cn50xx;
  392. struct cvmx_l2c_tag_cn30xx {
  393. uint64_t reserved:41;
  394. uint64_t V:1; /* Line valid */
  395. uint64_t D:1; /* Line dirty */
  396. uint64_t L:1; /* Line locked */
  397. uint64_t U:1; /* Use, LRU eviction */
  398. uint64_t addr:19; /* Phys mem addr (33..15) */
  399. } cn30xx;
  400. struct cvmx_l2c_tag_cn31xx {
  401. uint64_t reserved:42;
  402. uint64_t V:1; /* Line valid */
  403. uint64_t D:1; /* Line dirty */
  404. uint64_t L:1; /* Line locked */
  405. uint64_t U:1; /* Use, LRU eviction */
  406. uint64_t addr:18; /* Phys mem addr (33..16) */
  407. } cn31xx;
  408. struct cvmx_l2c_tag_cn38xx {
  409. uint64_t reserved:43;
  410. uint64_t V:1; /* Line valid */
  411. uint64_t D:1; /* Line dirty */
  412. uint64_t L:1; /* Line locked */
  413. uint64_t U:1; /* Use, LRU eviction */
  414. uint64_t addr:17; /* Phys mem addr (33..17) */
  415. } cn38xx;
  416. struct cvmx_l2c_tag_cn58xx {
  417. uint64_t reserved:44;
  418. uint64_t V:1; /* Line valid */
  419. uint64_t D:1; /* Line dirty */
  420. uint64_t L:1; /* Line locked */
  421. uint64_t U:1; /* Use, LRU eviction */
  422. uint64_t addr:16; /* Phys mem addr (33..18) */
  423. } cn58xx;
  424. struct cvmx_l2c_tag_cn58xx cn56xx; /* 2048 sets */
  425. struct cvmx_l2c_tag_cn31xx cn52xx; /* 512 sets */
  426. };
  427. /**
  428. * @INTERNAL
  429. * Function to read a L2C tag. This code make the current core
  430. * the 'debug core' for the L2. This code must only be executed by
  431. * 1 core at a time.
  432. *
  433. * @assoc: Association (way) of the tag to dump
  434. * @index: Index of the cacheline
  435. *
  436. * Returns The Octeon model specific tag structure. This is
  437. * translated by a wrapper function to a generic form that is
  438. * easier for applications to use.
  439. */
  440. static union __cvmx_l2c_tag __read_l2_tag(uint64_t assoc, uint64_t index)
  441. {
  442. uint64_t debug_tag_addr = (((1ULL << 63) | (index << 7)) + 96);
  443. uint64_t core = cvmx_get_core_num();
  444. union __cvmx_l2c_tag tag_val;
  445. uint64_t dbg_addr = CVMX_L2C_DBG;
  446. unsigned long flags;
  447. union cvmx_l2c_dbg debug_val;
  448. debug_val.u64 = 0;
  449. /*
  450. * For low core count parts, the core number is always small enough
  451. * to stay in the correct field and not set any reserved bits.
  452. */
  453. debug_val.s.ppnum = core;
  454. debug_val.s.l2t = 1;
  455. debug_val.s.set = assoc;
  456. /*
  457. * Make sure core is quiet (no prefetches, etc.) before
  458. * entering debug mode.
  459. */
  460. CVMX_SYNC;
  461. /* Flush L1 to make sure debug load misses L1 */
  462. CVMX_DCACHE_INVALIDATE;
  463. local_irq_save(flags);
  464. /*
  465. * The following must be done in assembly as when in debug
  466. * mode all data loads from L2 return special debug data, not
  467. * normal memory contents. Also, interrupts must be
  468. * disabled, since if an interrupt occurs while in debug mode
  469. * the ISR will get debug data from all its memory reads
  470. * instead of the contents of memory
  471. */
  472. asm volatile (".set push \n"
  473. " .set mips64 \n"
  474. " .set noreorder \n"
  475. /* Enter debug mode, wait for store */
  476. " sd %[dbg_val], 0(%[dbg_addr]) \n"
  477. " ld $0, 0(%[dbg_addr]) \n"
  478. /* Read L2C tag data */
  479. " ld %[tag_val], 0(%[tag_addr]) \n"
  480. /* Exit debug mode, wait for store */
  481. " sd $0, 0(%[dbg_addr]) \n"
  482. " ld $0, 0(%[dbg_addr]) \n"
  483. /* Invalidate dcache to discard debug data */
  484. " cache 9, 0($0) \n"
  485. " .set pop" :
  486. [tag_val] "=r"(tag_val.u64) : [dbg_addr] "r"(dbg_addr),
  487. [dbg_val] "r"(debug_val.u64),
  488. [tag_addr] "r"(debug_tag_addr) : "memory");
  489. local_irq_restore(flags);
  490. return tag_val;
  491. }
  492. union cvmx_l2c_tag cvmx_l2c_get_tag(uint32_t association, uint32_t index)
  493. {
  494. union __cvmx_l2c_tag tmp_tag;
  495. union cvmx_l2c_tag tag;
  496. tag.u64 = 0;
  497. if ((int)association >= cvmx_l2c_get_num_assoc()) {
  498. cvmx_dprintf
  499. ("ERROR: cvmx_get_l2c_tag association out of range\n");
  500. return tag;
  501. }
  502. if ((int)index >= cvmx_l2c_get_num_sets()) {
  503. cvmx_dprintf("ERROR: cvmx_get_l2c_tag "
  504. "index out of range (arg: %d, max: %d\n",
  505. index, cvmx_l2c_get_num_sets());
  506. return tag;
  507. }
  508. /* __read_l2_tag is intended for internal use only */
  509. tmp_tag = __read_l2_tag(association, index);
  510. /*
  511. * Convert all tag structure types to generic version, as it
  512. * can represent all models.
  513. */
  514. if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) {
  515. tag.s.V = tmp_tag.cn58xx.V;
  516. tag.s.D = tmp_tag.cn58xx.D;
  517. tag.s.L = tmp_tag.cn58xx.L;
  518. tag.s.U = tmp_tag.cn58xx.U;
  519. tag.s.addr = tmp_tag.cn58xx.addr;
  520. } else if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
  521. tag.s.V = tmp_tag.cn38xx.V;
  522. tag.s.D = tmp_tag.cn38xx.D;
  523. tag.s.L = tmp_tag.cn38xx.L;
  524. tag.s.U = tmp_tag.cn38xx.U;
  525. tag.s.addr = tmp_tag.cn38xx.addr;
  526. } else if (OCTEON_IS_MODEL(OCTEON_CN31XX)
  527. || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
  528. tag.s.V = tmp_tag.cn31xx.V;
  529. tag.s.D = tmp_tag.cn31xx.D;
  530. tag.s.L = tmp_tag.cn31xx.L;
  531. tag.s.U = tmp_tag.cn31xx.U;
  532. tag.s.addr = tmp_tag.cn31xx.addr;
  533. } else if (OCTEON_IS_MODEL(OCTEON_CN30XX)) {
  534. tag.s.V = tmp_tag.cn30xx.V;
  535. tag.s.D = tmp_tag.cn30xx.D;
  536. tag.s.L = tmp_tag.cn30xx.L;
  537. tag.s.U = tmp_tag.cn30xx.U;
  538. tag.s.addr = tmp_tag.cn30xx.addr;
  539. } else if (OCTEON_IS_MODEL(OCTEON_CN50XX)) {
  540. tag.s.V = tmp_tag.cn50xx.V;
  541. tag.s.D = tmp_tag.cn50xx.D;
  542. tag.s.L = tmp_tag.cn50xx.L;
  543. tag.s.U = tmp_tag.cn50xx.U;
  544. tag.s.addr = tmp_tag.cn50xx.addr;
  545. } else {
  546. cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
  547. }
  548. return tag;
  549. }
  550. uint32_t cvmx_l2c_address_to_index(uint64_t addr)
  551. {
  552. uint64_t idx = addr >> CVMX_L2C_IDX_ADDR_SHIFT;
  553. union cvmx_l2c_cfg l2c_cfg;
  554. l2c_cfg.u64 = cvmx_read_csr(CVMX_L2C_CFG);
  555. if (l2c_cfg.s.idxalias) {
  556. idx ^=
  557. ((addr & CVMX_L2C_ALIAS_MASK) >>
  558. CVMX_L2C_TAG_ADDR_ALIAS_SHIFT);
  559. }
  560. idx &= CVMX_L2C_IDX_MASK;
  561. return idx;
  562. }
  563. int cvmx_l2c_get_cache_size_bytes(void)
  564. {
  565. return cvmx_l2c_get_num_sets() * cvmx_l2c_get_num_assoc() *
  566. CVMX_CACHE_LINE_SIZE;
  567. }
  568. /**
  569. * Return log base 2 of the number of sets in the L2 cache
  570. * Returns
  571. */
  572. int cvmx_l2c_get_set_bits(void)
  573. {
  574. int l2_set_bits;
  575. if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX))
  576. l2_set_bits = 11; /* 2048 sets */
  577. else if (OCTEON_IS_MODEL(OCTEON_CN38XX))
  578. l2_set_bits = 10; /* 1024 sets */
  579. else if (OCTEON_IS_MODEL(OCTEON_CN31XX)
  580. || OCTEON_IS_MODEL(OCTEON_CN52XX))
  581. l2_set_bits = 9; /* 512 sets */
  582. else if (OCTEON_IS_MODEL(OCTEON_CN30XX))
  583. l2_set_bits = 8; /* 256 sets */
  584. else if (OCTEON_IS_MODEL(OCTEON_CN50XX))
  585. l2_set_bits = 7; /* 128 sets */
  586. else {
  587. cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
  588. l2_set_bits = 11; /* 2048 sets */
  589. }
  590. return l2_set_bits;
  591. }
  592. /* Return the number of sets in the L2 Cache */
  593. int cvmx_l2c_get_num_sets(void)
  594. {
  595. return 1 << cvmx_l2c_get_set_bits();
  596. }
  597. /* Return the number of associations in the L2 Cache */
  598. int cvmx_l2c_get_num_assoc(void)
  599. {
  600. int l2_assoc;
  601. if (OCTEON_IS_MODEL(OCTEON_CN56XX) ||
  602. OCTEON_IS_MODEL(OCTEON_CN52XX) ||
  603. OCTEON_IS_MODEL(OCTEON_CN58XX) ||
  604. OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN38XX))
  605. l2_assoc = 8;
  606. else if (OCTEON_IS_MODEL(OCTEON_CN31XX) ||
  607. OCTEON_IS_MODEL(OCTEON_CN30XX))
  608. l2_assoc = 4;
  609. else {
  610. cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
  611. l2_assoc = 8;
  612. }
  613. /* Check to see if part of the cache is disabled */
  614. if (cvmx_fuse_read(265))
  615. l2_assoc = l2_assoc >> 2;
  616. else if (cvmx_fuse_read(264))
  617. l2_assoc = l2_assoc >> 1;
  618. return l2_assoc;
  619. }
  620. /**
  621. * Flush a line from the L2 cache
  622. * This should only be called from one core at a time, as this routine
  623. * sets the core to the 'debug' core in order to flush the line.
  624. *
  625. * @assoc: Association (or way) to flush
  626. * @index: Index to flush
  627. */
  628. void cvmx_l2c_flush_line(uint32_t assoc, uint32_t index)
  629. {
  630. union cvmx_l2c_dbg l2cdbg;
  631. l2cdbg.u64 = 0;
  632. l2cdbg.s.ppnum = cvmx_get_core_num();
  633. l2cdbg.s.finv = 1;
  634. l2cdbg.s.set = assoc;
  635. /*
  636. * Enter debug mode, and make sure all other writes complete
  637. * before we enter debug mode.
  638. */
  639. asm volatile ("sync" : : : "memory");
  640. cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
  641. cvmx_read_csr(CVMX_L2C_DBG);
  642. CVMX_PREPARE_FOR_STORE(((1ULL << 63) + (index) * 128), 0);
  643. /* Exit debug mode */
  644. asm volatile ("sync" : : : "memory");
  645. cvmx_write_csr(CVMX_L2C_DBG, 0);
  646. cvmx_read_csr(CVMX_L2C_DBG);
  647. }