hardwall.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. */
  14. #include <linux/fs.h>
  15. #include <linux/proc_fs.h>
  16. #include <linux/seq_file.h>
  17. #include <linux/rwsem.h>
  18. #include <linux/kprobes.h>
  19. #include <linux/sched.h>
  20. #include <linux/hardirq.h>
  21. #include <linux/uaccess.h>
  22. #include <linux/smp.h>
  23. #include <linux/cdev.h>
  24. #include <linux/compat.h>
  25. #include <asm/hardwall.h>
  26. #include <asm/traps.h>
  27. #include <asm/siginfo.h>
  28. #include <asm/irq_regs.h>
  29. #include <arch/interrupts.h>
  30. #include <arch/spr_def.h>
  31. /*
  32. * Implement a per-cpu "hardwall" resource class such as UDN or IPI.
  33. * We use "hardwall" nomenclature throughout for historical reasons.
  34. * The lock here controls access to the list data structure as well as
  35. * to the items on the list.
  36. */
  37. struct hardwall_type {
  38. int index;
  39. int is_xdn;
  40. int is_idn;
  41. int disabled;
  42. const char *name;
  43. struct list_head list;
  44. spinlock_t lock;
  45. struct proc_dir_entry *proc_dir;
  46. };
  47. enum hardwall_index {
  48. HARDWALL_UDN = 0,
  49. #ifndef __tilepro__
  50. HARDWALL_IDN = 1,
  51. HARDWALL_IPI = 2,
  52. #endif
  53. _HARDWALL_TYPES
  54. };
  55. static struct hardwall_type hardwall_types[] = {
  56. { /* user-space access to UDN */
  57. 0,
  58. 1,
  59. 0,
  60. 0,
  61. "udn",
  62. LIST_HEAD_INIT(hardwall_types[HARDWALL_UDN].list),
  63. __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_UDN].lock),
  64. NULL
  65. },
  66. #ifndef __tilepro__
  67. { /* user-space access to IDN */
  68. 1,
  69. 1,
  70. 1,
  71. 1, /* disabled pending hypervisor support */
  72. "idn",
  73. LIST_HEAD_INIT(hardwall_types[HARDWALL_IDN].list),
  74. __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IDN].lock),
  75. NULL
  76. },
  77. { /* access to user-space IPI */
  78. 2,
  79. 0,
  80. 0,
  81. 0,
  82. "ipi",
  83. LIST_HEAD_INIT(hardwall_types[HARDWALL_IPI].list),
  84. __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IPI].lock),
  85. NULL
  86. },
  87. #endif
  88. };
  89. /*
  90. * This data structure tracks the cpu data, etc., associated
  91. * one-to-one with a "struct file *" from opening a hardwall device file.
  92. * Note that the file's private data points back to this structure.
  93. */
  94. struct hardwall_info {
  95. struct list_head list; /* for hardwall_types.list */
  96. struct list_head task_head; /* head of tasks in this hardwall */
  97. struct hardwall_type *type; /* type of this resource */
  98. struct cpumask cpumask; /* cpus reserved */
  99. int id; /* integer id for this hardwall */
  100. int teardown_in_progress; /* are we tearing this one down? */
  101. /* Remaining fields only valid for user-network resources. */
  102. int ulhc_x; /* upper left hand corner x coord */
  103. int ulhc_y; /* upper left hand corner y coord */
  104. int width; /* rectangle width */
  105. int height; /* rectangle height */
  106. #if CHIP_HAS_REV1_XDN()
  107. atomic_t xdn_pending_count; /* cores in phase 1 of drain */
  108. #endif
  109. };
  110. /* /proc/tile/hardwall */
  111. static struct proc_dir_entry *hardwall_proc_dir;
  112. /* Functions to manage files in /proc/tile/hardwall. */
  113. static void hardwall_add_proc(struct hardwall_info *);
  114. static void hardwall_remove_proc(struct hardwall_info *);
  115. /* Allow disabling UDN access. */
  116. static int __init noudn(char *str)
  117. {
  118. pr_info("User-space UDN access is disabled\n");
  119. hardwall_types[HARDWALL_UDN].disabled = 1;
  120. return 0;
  121. }
  122. early_param("noudn", noudn);
  123. #ifndef __tilepro__
  124. /* Allow disabling IDN access. */
  125. static int __init noidn(char *str)
  126. {
  127. pr_info("User-space IDN access is disabled\n");
  128. hardwall_types[HARDWALL_IDN].disabled = 1;
  129. return 0;
  130. }
  131. early_param("noidn", noidn);
  132. /* Allow disabling IPI access. */
  133. static int __init noipi(char *str)
  134. {
  135. pr_info("User-space IPI access is disabled\n");
  136. hardwall_types[HARDWALL_IPI].disabled = 1;
  137. return 0;
  138. }
  139. early_param("noipi", noipi);
  140. #endif
  141. /*
  142. * Low-level primitives for UDN/IDN
  143. */
  144. #ifdef __tilepro__
  145. #define mtspr_XDN(hwt, name, val) \
  146. do { (void)(hwt); __insn_mtspr(SPR_UDN_##name, (val)); } while (0)
  147. #define mtspr_MPL_XDN(hwt, name, val) \
  148. do { (void)(hwt); __insn_mtspr(SPR_MPL_UDN_##name, (val)); } while (0)
  149. #define mfspr_XDN(hwt, name) \
  150. ((void)(hwt), __insn_mfspr(SPR_UDN_##name))
  151. #else
  152. #define mtspr_XDN(hwt, name, val) \
  153. do { \
  154. if ((hwt)->is_idn) \
  155. __insn_mtspr(SPR_IDN_##name, (val)); \
  156. else \
  157. __insn_mtspr(SPR_UDN_##name, (val)); \
  158. } while (0)
  159. #define mtspr_MPL_XDN(hwt, name, val) \
  160. do { \
  161. if ((hwt)->is_idn) \
  162. __insn_mtspr(SPR_MPL_IDN_##name, (val)); \
  163. else \
  164. __insn_mtspr(SPR_MPL_UDN_##name, (val)); \
  165. } while (0)
  166. #define mfspr_XDN(hwt, name) \
  167. ((hwt)->is_idn ? __insn_mfspr(SPR_IDN_##name) : __insn_mfspr(SPR_UDN_##name))
  168. #endif
  169. /* Set a CPU bit if the CPU is online. */
  170. #define cpu_online_set(cpu, dst) do { \
  171. if (cpu_online(cpu)) \
  172. cpumask_set_cpu(cpu, dst); \
  173. } while (0)
  174. /* Does the given rectangle contain the given x,y coordinate? */
  175. static int contains(struct hardwall_info *r, int x, int y)
  176. {
  177. return (x >= r->ulhc_x && x < r->ulhc_x + r->width) &&
  178. (y >= r->ulhc_y && y < r->ulhc_y + r->height);
  179. }
  180. /* Compute the rectangle parameters and validate the cpumask. */
  181. static int check_rectangle(struct hardwall_info *r, struct cpumask *mask)
  182. {
  183. int x, y, cpu, ulhc, lrhc;
  184. /* The first cpu is the ULHC, the last the LRHC. */
  185. ulhc = find_first_bit(cpumask_bits(mask), nr_cpumask_bits);
  186. lrhc = find_last_bit(cpumask_bits(mask), nr_cpumask_bits);
  187. /* Compute the rectangle attributes from the cpus. */
  188. r->ulhc_x = cpu_x(ulhc);
  189. r->ulhc_y = cpu_y(ulhc);
  190. r->width = cpu_x(lrhc) - r->ulhc_x + 1;
  191. r->height = cpu_y(lrhc) - r->ulhc_y + 1;
  192. /* Width and height must be positive */
  193. if (r->width <= 0 || r->height <= 0)
  194. return -EINVAL;
  195. /* Confirm that the cpumask is exactly the rectangle. */
  196. for (y = 0, cpu = 0; y < smp_height; ++y)
  197. for (x = 0; x < smp_width; ++x, ++cpu)
  198. if (cpumask_test_cpu(cpu, mask) != contains(r, x, y))
  199. return -EINVAL;
  200. /*
  201. * Note that offline cpus can't be drained when this user network
  202. * rectangle eventually closes. We used to detect this
  203. * situation and print a warning, but it annoyed users and
  204. * they ignored it anyway, so now we just return without a
  205. * warning.
  206. */
  207. return 0;
  208. }
  209. /*
  210. * Hardware management of hardwall setup, teardown, trapping,
  211. * and enabling/disabling PL0 access to the networks.
  212. */
  213. /* Bit field values to mask together for writes to SPR_XDN_DIRECTION_PROTECT */
  214. enum direction_protect {
  215. N_PROTECT = (1 << 0),
  216. E_PROTECT = (1 << 1),
  217. S_PROTECT = (1 << 2),
  218. W_PROTECT = (1 << 3),
  219. C_PROTECT = (1 << 4),
  220. };
  221. static inline int xdn_which_interrupt(struct hardwall_type *hwt)
  222. {
  223. #ifndef __tilepro__
  224. if (hwt->is_idn)
  225. return INT_IDN_FIREWALL;
  226. #endif
  227. return INT_UDN_FIREWALL;
  228. }
  229. static void enable_firewall_interrupts(struct hardwall_type *hwt)
  230. {
  231. arch_local_irq_unmask_now(xdn_which_interrupt(hwt));
  232. }
  233. static void disable_firewall_interrupts(struct hardwall_type *hwt)
  234. {
  235. arch_local_irq_mask_now(xdn_which_interrupt(hwt));
  236. }
  237. /* Set up hardwall on this cpu based on the passed hardwall_info. */
  238. static void hardwall_setup_func(void *info)
  239. {
  240. struct hardwall_info *r = info;
  241. struct hardwall_type *hwt = r->type;
  242. int cpu = smp_processor_id();
  243. int x = cpu % smp_width;
  244. int y = cpu / smp_width;
  245. int bits = 0;
  246. if (x == r->ulhc_x)
  247. bits |= W_PROTECT;
  248. if (x == r->ulhc_x + r->width - 1)
  249. bits |= E_PROTECT;
  250. if (y == r->ulhc_y)
  251. bits |= N_PROTECT;
  252. if (y == r->ulhc_y + r->height - 1)
  253. bits |= S_PROTECT;
  254. BUG_ON(bits == 0);
  255. mtspr_XDN(hwt, DIRECTION_PROTECT, bits);
  256. enable_firewall_interrupts(hwt);
  257. }
  258. /* Set up all cpus on edge of rectangle to enable/disable hardwall SPRs. */
  259. static void hardwall_protect_rectangle(struct hardwall_info *r)
  260. {
  261. int x, y, cpu, delta;
  262. struct cpumask rect_cpus;
  263. cpumask_clear(&rect_cpus);
  264. /* First include the top and bottom edges */
  265. cpu = r->ulhc_y * smp_width + r->ulhc_x;
  266. delta = (r->height - 1) * smp_width;
  267. for (x = 0; x < r->width; ++x, ++cpu) {
  268. cpu_online_set(cpu, &rect_cpus);
  269. cpu_online_set(cpu + delta, &rect_cpus);
  270. }
  271. /* Then the left and right edges */
  272. cpu -= r->width;
  273. delta = r->width - 1;
  274. for (y = 0; y < r->height; ++y, cpu += smp_width) {
  275. cpu_online_set(cpu, &rect_cpus);
  276. cpu_online_set(cpu + delta, &rect_cpus);
  277. }
  278. /* Then tell all the cpus to set up their protection SPR */
  279. on_each_cpu_mask(&rect_cpus, hardwall_setup_func, r, 1);
  280. }
  281. void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
  282. {
  283. struct hardwall_info *rect;
  284. struct hardwall_type *hwt;
  285. struct task_struct *p;
  286. struct siginfo info;
  287. int cpu = smp_processor_id();
  288. int found_processes;
  289. unsigned long flags;
  290. struct pt_regs *old_regs = set_irq_regs(regs);
  291. irq_enter();
  292. /* Figure out which network trapped. */
  293. switch (fault_num) {
  294. #ifndef __tilepro__
  295. case INT_IDN_FIREWALL:
  296. hwt = &hardwall_types[HARDWALL_IDN];
  297. break;
  298. #endif
  299. case INT_UDN_FIREWALL:
  300. hwt = &hardwall_types[HARDWALL_UDN];
  301. break;
  302. default:
  303. BUG();
  304. }
  305. BUG_ON(hwt->disabled);
  306. /* This tile trapped a network access; find the rectangle. */
  307. spin_lock_irqsave(&hwt->lock, flags);
  308. list_for_each_entry(rect, &hwt->list, list) {
  309. if (cpumask_test_cpu(cpu, &rect->cpumask))
  310. break;
  311. }
  312. /*
  313. * It shouldn't be possible not to find this cpu on the
  314. * rectangle list, since only cpus in rectangles get hardwalled.
  315. * The hardwall is only removed after the user network is drained.
  316. */
  317. BUG_ON(&rect->list == &hwt->list);
  318. /*
  319. * If we already started teardown on this hardwall, don't worry;
  320. * the abort signal has been sent and we are just waiting for things
  321. * to quiesce.
  322. */
  323. if (rect->teardown_in_progress) {
  324. pr_notice("cpu %d: detected %s hardwall violation %#lx"
  325. " while teardown already in progress\n",
  326. cpu, hwt->name,
  327. (long)mfspr_XDN(hwt, DIRECTION_PROTECT));
  328. goto done;
  329. }
  330. /*
  331. * Kill off any process that is activated in this rectangle.
  332. * We bypass security to deliver the signal, since it must be
  333. * one of the activated processes that generated the user network
  334. * message that caused this trap, and all the activated
  335. * processes shared a single open file so are pretty tightly
  336. * bound together from a security point of view to begin with.
  337. */
  338. rect->teardown_in_progress = 1;
  339. wmb(); /* Ensure visibility of rectangle before notifying processes. */
  340. pr_notice("cpu %d: detected %s hardwall violation %#lx...\n",
  341. cpu, hwt->name, (long)mfspr_XDN(hwt, DIRECTION_PROTECT));
  342. info.si_signo = SIGILL;
  343. info.si_errno = 0;
  344. info.si_code = ILL_HARDWALL;
  345. found_processes = 0;
  346. list_for_each_entry(p, &rect->task_head,
  347. thread.hardwall[hwt->index].list) {
  348. BUG_ON(p->thread.hardwall[hwt->index].info != rect);
  349. if (!(p->flags & PF_EXITING)) {
  350. found_processes = 1;
  351. pr_notice("hardwall: killing %d\n", p->pid);
  352. do_send_sig_info(info.si_signo, &info, p, false);
  353. }
  354. }
  355. if (!found_processes)
  356. pr_notice("hardwall: no associated processes!\n");
  357. done:
  358. spin_unlock_irqrestore(&hwt->lock, flags);
  359. /*
  360. * We have to disable firewall interrupts now, or else when we
  361. * return from this handler, we will simply re-interrupt back to
  362. * it. However, we can't clear the protection bits, since we
  363. * haven't yet drained the network, and that would allow packets
  364. * to cross out of the hardwall region.
  365. */
  366. disable_firewall_interrupts(hwt);
  367. irq_exit();
  368. set_irq_regs(old_regs);
  369. }
  370. /* Allow access from user space to the user network. */
  371. void grant_hardwall_mpls(struct hardwall_type *hwt)
  372. {
  373. #ifndef __tilepro__
  374. if (!hwt->is_xdn) {
  375. __insn_mtspr(SPR_MPL_IPI_0_SET_0, 1);
  376. return;
  377. }
  378. #endif
  379. mtspr_MPL_XDN(hwt, ACCESS_SET_0, 1);
  380. mtspr_MPL_XDN(hwt, AVAIL_SET_0, 1);
  381. mtspr_MPL_XDN(hwt, COMPLETE_SET_0, 1);
  382. mtspr_MPL_XDN(hwt, TIMER_SET_0, 1);
  383. #if !CHIP_HAS_REV1_XDN()
  384. mtspr_MPL_XDN(hwt, REFILL_SET_0, 1);
  385. mtspr_MPL_XDN(hwt, CA_SET_0, 1);
  386. #endif
  387. }
  388. /* Deny access from user space to the user network. */
  389. void restrict_hardwall_mpls(struct hardwall_type *hwt)
  390. {
  391. #ifndef __tilepro__
  392. if (!hwt->is_xdn) {
  393. __insn_mtspr(SPR_MPL_IPI_0_SET_1, 1);
  394. return;
  395. }
  396. #endif
  397. mtspr_MPL_XDN(hwt, ACCESS_SET_1, 1);
  398. mtspr_MPL_XDN(hwt, AVAIL_SET_1, 1);
  399. mtspr_MPL_XDN(hwt, COMPLETE_SET_1, 1);
  400. mtspr_MPL_XDN(hwt, TIMER_SET_1, 1);
  401. #if !CHIP_HAS_REV1_XDN()
  402. mtspr_MPL_XDN(hwt, REFILL_SET_1, 1);
  403. mtspr_MPL_XDN(hwt, CA_SET_1, 1);
  404. #endif
  405. }
  406. /* Restrict or deny as necessary for the task we're switching to. */
  407. void hardwall_switch_tasks(struct task_struct *prev,
  408. struct task_struct *next)
  409. {
  410. int i;
  411. for (i = 0; i < HARDWALL_TYPES; ++i) {
  412. if (prev->thread.hardwall[i].info != NULL) {
  413. if (next->thread.hardwall[i].info == NULL)
  414. restrict_hardwall_mpls(&hardwall_types[i]);
  415. } else if (next->thread.hardwall[i].info != NULL) {
  416. grant_hardwall_mpls(&hardwall_types[i]);
  417. }
  418. }
  419. }
  420. /* Does this task have the right to IPI the given cpu? */
  421. int hardwall_ipi_valid(int cpu)
  422. {
  423. #ifdef __tilegx__
  424. struct hardwall_info *info =
  425. current->thread.hardwall[HARDWALL_IPI].info;
  426. return info && cpumask_test_cpu(cpu, &info->cpumask);
  427. #else
  428. return 0;
  429. #endif
  430. }
  431. /*
  432. * Code to create, activate, deactivate, and destroy hardwall resources.
  433. */
  434. /* Create a hardwall for the given resource */
  435. static struct hardwall_info *hardwall_create(struct hardwall_type *hwt,
  436. size_t size,
  437. const unsigned char __user *bits)
  438. {
  439. struct hardwall_info *iter, *info;
  440. struct cpumask mask;
  441. unsigned long flags;
  442. int rc;
  443. /* Reject crazy sizes out of hand, a la sys_mbind(). */
  444. if (size > PAGE_SIZE)
  445. return ERR_PTR(-EINVAL);
  446. /* Copy whatever fits into a cpumask. */
  447. if (copy_from_user(&mask, bits, min(sizeof(struct cpumask), size)))
  448. return ERR_PTR(-EFAULT);
  449. /*
  450. * If the size was short, clear the rest of the mask;
  451. * otherwise validate that the rest of the user mask was zero
  452. * (we don't try hard to be efficient when validating huge masks).
  453. */
  454. if (size < sizeof(struct cpumask)) {
  455. memset((char *)&mask + size, 0, sizeof(struct cpumask) - size);
  456. } else if (size > sizeof(struct cpumask)) {
  457. size_t i;
  458. for (i = sizeof(struct cpumask); i < size; ++i) {
  459. char c;
  460. if (get_user(c, &bits[i]))
  461. return ERR_PTR(-EFAULT);
  462. if (c)
  463. return ERR_PTR(-EINVAL);
  464. }
  465. }
  466. /* Allocate a new hardwall_info optimistically. */
  467. info = kmalloc(sizeof(struct hardwall_info),
  468. GFP_KERNEL | __GFP_ZERO);
  469. if (info == NULL)
  470. return ERR_PTR(-ENOMEM);
  471. INIT_LIST_HEAD(&info->task_head);
  472. info->type = hwt;
  473. /* Compute the rectangle size and validate that it's plausible. */
  474. cpumask_copy(&info->cpumask, &mask);
  475. info->id = find_first_bit(cpumask_bits(&mask), nr_cpumask_bits);
  476. if (hwt->is_xdn) {
  477. rc = check_rectangle(info, &mask);
  478. if (rc != 0) {
  479. kfree(info);
  480. return ERR_PTR(rc);
  481. }
  482. }
  483. /* Confirm it doesn't overlap and add it to the list. */
  484. spin_lock_irqsave(&hwt->lock, flags);
  485. list_for_each_entry(iter, &hwt->list, list) {
  486. if (cpumask_intersects(&iter->cpumask, &info->cpumask)) {
  487. spin_unlock_irqrestore(&hwt->lock, flags);
  488. kfree(info);
  489. return ERR_PTR(-EBUSY);
  490. }
  491. }
  492. list_add_tail(&info->list, &hwt->list);
  493. spin_unlock_irqrestore(&hwt->lock, flags);
  494. /* Set up appropriate hardwalling on all affected cpus. */
  495. if (hwt->is_xdn)
  496. hardwall_protect_rectangle(info);
  497. /* Create a /proc/tile/hardwall entry. */
  498. hardwall_add_proc(info);
  499. return info;
  500. }
  501. /* Activate a given hardwall on this cpu for this process. */
  502. static int hardwall_activate(struct hardwall_info *info)
  503. {
  504. int cpu;
  505. unsigned long flags;
  506. struct task_struct *p = current;
  507. struct thread_struct *ts = &p->thread;
  508. struct hardwall_type *hwt;
  509. /* Require a hardwall. */
  510. if (info == NULL)
  511. return -ENODATA;
  512. /* Not allowed to activate a hardwall that is being torn down. */
  513. if (info->teardown_in_progress)
  514. return -EINVAL;
  515. /*
  516. * Get our affinity; if we're not bound to this tile uniquely,
  517. * we can't access the network registers.
  518. */
  519. if (cpumask_weight(&p->cpus_allowed) != 1)
  520. return -EPERM;
  521. /* Make sure we are bound to a cpu assigned to this resource. */
  522. cpu = smp_processor_id();
  523. BUG_ON(cpumask_first(&p->cpus_allowed) != cpu);
  524. if (!cpumask_test_cpu(cpu, &info->cpumask))
  525. return -EINVAL;
  526. /* If we are already bound to this hardwall, it's a no-op. */
  527. hwt = info->type;
  528. if (ts->hardwall[hwt->index].info) {
  529. BUG_ON(ts->hardwall[hwt->index].info != info);
  530. return 0;
  531. }
  532. /* Success! This process gets to use the resource on this cpu. */
  533. ts->hardwall[hwt->index].info = info;
  534. spin_lock_irqsave(&hwt->lock, flags);
  535. list_add(&ts->hardwall[hwt->index].list, &info->task_head);
  536. spin_unlock_irqrestore(&hwt->lock, flags);
  537. grant_hardwall_mpls(hwt);
  538. printk(KERN_DEBUG "Pid %d (%s) activated for %s hardwall: cpu %d\n",
  539. p->pid, p->comm, hwt->name, cpu);
  540. return 0;
  541. }
  542. /*
  543. * Deactivate a task's hardwall. Must hold lock for hardwall_type.
  544. * This method may be called from free_task(), so we don't want to
  545. * rely on too many fields of struct task_struct still being valid.
  546. * We assume the cpus_allowed, pid, and comm fields are still valid.
  547. */
  548. static void _hardwall_deactivate(struct hardwall_type *hwt,
  549. struct task_struct *task)
  550. {
  551. struct thread_struct *ts = &task->thread;
  552. if (cpumask_weight(&task->cpus_allowed) != 1) {
  553. pr_err("pid %d (%s) releasing %s hardwall with"
  554. " an affinity mask containing %d cpus!\n",
  555. task->pid, task->comm, hwt->name,
  556. cpumask_weight(&task->cpus_allowed));
  557. BUG();
  558. }
  559. BUG_ON(ts->hardwall[hwt->index].info == NULL);
  560. ts->hardwall[hwt->index].info = NULL;
  561. list_del(&ts->hardwall[hwt->index].list);
  562. if (task == current)
  563. restrict_hardwall_mpls(hwt);
  564. }
  565. /* Deactivate a task's hardwall. */
  566. static int hardwall_deactivate(struct hardwall_type *hwt,
  567. struct task_struct *task)
  568. {
  569. unsigned long flags;
  570. int activated;
  571. spin_lock_irqsave(&hwt->lock, flags);
  572. activated = (task->thread.hardwall[hwt->index].info != NULL);
  573. if (activated)
  574. _hardwall_deactivate(hwt, task);
  575. spin_unlock_irqrestore(&hwt->lock, flags);
  576. if (!activated)
  577. return -EINVAL;
  578. printk(KERN_DEBUG "Pid %d (%s) deactivated for %s hardwall: cpu %d\n",
  579. task->pid, task->comm, hwt->name, smp_processor_id());
  580. return 0;
  581. }
  582. void hardwall_deactivate_all(struct task_struct *task)
  583. {
  584. int i;
  585. for (i = 0; i < HARDWALL_TYPES; ++i)
  586. if (task->thread.hardwall[i].info)
  587. hardwall_deactivate(&hardwall_types[i], task);
  588. }
  589. /* Stop the switch before draining the network. */
  590. static void stop_xdn_switch(void *arg)
  591. {
  592. #if !CHIP_HAS_REV1_XDN()
  593. /* Freeze the switch and the demux. */
  594. __insn_mtspr(SPR_UDN_SP_FREEZE,
  595. SPR_UDN_SP_FREEZE__SP_FRZ_MASK |
  596. SPR_UDN_SP_FREEZE__DEMUX_FRZ_MASK |
  597. SPR_UDN_SP_FREEZE__NON_DEST_EXT_MASK);
  598. #else
  599. /*
  600. * Drop all packets bound for the core or off the edge.
  601. * We rely on the normal hardwall protection setup code
  602. * to have set the low four bits to trigger firewall interrupts,
  603. * and shift those bits up to trigger "drop on send" semantics,
  604. * plus adding "drop on send to core" for all switches.
  605. * In practice it seems the switches latch the DIRECTION_PROTECT
  606. * SPR so they won't start dropping if they're already
  607. * delivering the last message to the core, but it doesn't
  608. * hurt to enable it here.
  609. */
  610. struct hardwall_type *hwt = arg;
  611. unsigned long protect = mfspr_XDN(hwt, DIRECTION_PROTECT);
  612. mtspr_XDN(hwt, DIRECTION_PROTECT, (protect | C_PROTECT) << 5);
  613. #endif
  614. }
  615. static void empty_xdn_demuxes(struct hardwall_type *hwt)
  616. {
  617. #ifndef __tilepro__
  618. if (hwt->is_idn) {
  619. while (__insn_mfspr(SPR_IDN_DATA_AVAIL) & (1 << 0))
  620. (void) __tile_idn0_receive();
  621. while (__insn_mfspr(SPR_IDN_DATA_AVAIL) & (1 << 1))
  622. (void) __tile_idn1_receive();
  623. return;
  624. }
  625. #endif
  626. while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 0))
  627. (void) __tile_udn0_receive();
  628. while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 1))
  629. (void) __tile_udn1_receive();
  630. while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 2))
  631. (void) __tile_udn2_receive();
  632. while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 3))
  633. (void) __tile_udn3_receive();
  634. }
  635. /* Drain all the state from a stopped switch. */
  636. static void drain_xdn_switch(void *arg)
  637. {
  638. struct hardwall_info *info = arg;
  639. struct hardwall_type *hwt = info->type;
  640. #if CHIP_HAS_REV1_XDN()
  641. /*
  642. * The switches have been configured to drop any messages
  643. * destined for cores (or off the edge of the rectangle).
  644. * But the current message may continue to be delivered,
  645. * so we wait until all the cores have finished any pending
  646. * messages before we stop draining.
  647. */
  648. int pending = mfspr_XDN(hwt, PENDING);
  649. while (pending--) {
  650. empty_xdn_demuxes(hwt);
  651. if (hwt->is_idn)
  652. __tile_idn_send(0);
  653. else
  654. __tile_udn_send(0);
  655. }
  656. atomic_dec(&info->xdn_pending_count);
  657. while (atomic_read(&info->xdn_pending_count))
  658. empty_xdn_demuxes(hwt);
  659. #else
  660. int i;
  661. int from_tile_words, ca_count;
  662. /* Empty out the 5 switch point fifos. */
  663. for (i = 0; i < 5; i++) {
  664. int words, j;
  665. __insn_mtspr(SPR_UDN_SP_FIFO_SEL, i);
  666. words = __insn_mfspr(SPR_UDN_SP_STATE) & 0xF;
  667. for (j = 0; j < words; j++)
  668. (void) __insn_mfspr(SPR_UDN_SP_FIFO_DATA);
  669. BUG_ON((__insn_mfspr(SPR_UDN_SP_STATE) & 0xF) != 0);
  670. }
  671. /* Dump out the 3 word fifo at top. */
  672. from_tile_words = (__insn_mfspr(SPR_UDN_DEMUX_STATUS) >> 10) & 0x3;
  673. for (i = 0; i < from_tile_words; i++)
  674. (void) __insn_mfspr(SPR_UDN_DEMUX_WRITE_FIFO);
  675. /* Empty out demuxes. */
  676. empty_xdn_demuxes(hwt);
  677. /* Empty out catch all. */
  678. ca_count = __insn_mfspr(SPR_UDN_DEMUX_CA_COUNT);
  679. for (i = 0; i < ca_count; i++)
  680. (void) __insn_mfspr(SPR_UDN_CA_DATA);
  681. BUG_ON(__insn_mfspr(SPR_UDN_DEMUX_CA_COUNT) != 0);
  682. /* Clear demux logic. */
  683. __insn_mtspr(SPR_UDN_DEMUX_CTL, 1);
  684. /*
  685. * Write switch state; experimentation indicates that 0xc3000
  686. * is an idle switch point.
  687. */
  688. for (i = 0; i < 5; i++) {
  689. __insn_mtspr(SPR_UDN_SP_FIFO_SEL, i);
  690. __insn_mtspr(SPR_UDN_SP_STATE, 0xc3000);
  691. }
  692. #endif
  693. }
  694. /* Reset random XDN state registers at boot up and during hardwall teardown. */
  695. static void reset_xdn_network_state(struct hardwall_type *hwt)
  696. {
  697. if (hwt->disabled)
  698. return;
  699. /* Clear out other random registers so we have a clean slate. */
  700. mtspr_XDN(hwt, DIRECTION_PROTECT, 0);
  701. mtspr_XDN(hwt, AVAIL_EN, 0);
  702. mtspr_XDN(hwt, DEADLOCK_TIMEOUT, 0);
  703. #if !CHIP_HAS_REV1_XDN()
  704. /* Reset UDN coordinates to their standard value */
  705. {
  706. unsigned int cpu = smp_processor_id();
  707. unsigned int x = cpu % smp_width;
  708. unsigned int y = cpu / smp_width;
  709. __insn_mtspr(SPR_UDN_TILE_COORD, (x << 18) | (y << 7));
  710. }
  711. /* Set demux tags to predefined values and enable them. */
  712. __insn_mtspr(SPR_UDN_TAG_VALID, 0xf);
  713. __insn_mtspr(SPR_UDN_TAG_0, (1 << 0));
  714. __insn_mtspr(SPR_UDN_TAG_1, (1 << 1));
  715. __insn_mtspr(SPR_UDN_TAG_2, (1 << 2));
  716. __insn_mtspr(SPR_UDN_TAG_3, (1 << 3));
  717. /* Set other rev0 random registers to a clean state. */
  718. __insn_mtspr(SPR_UDN_REFILL_EN, 0);
  719. __insn_mtspr(SPR_UDN_DEMUX_QUEUE_SEL, 0);
  720. __insn_mtspr(SPR_UDN_SP_FIFO_SEL, 0);
  721. /* Start the switch and demux. */
  722. __insn_mtspr(SPR_UDN_SP_FREEZE, 0);
  723. #endif
  724. }
  725. void reset_network_state(void)
  726. {
  727. reset_xdn_network_state(&hardwall_types[HARDWALL_UDN]);
  728. #ifndef __tilepro__
  729. reset_xdn_network_state(&hardwall_types[HARDWALL_IDN]);
  730. #endif
  731. }
  732. /* Restart an XDN switch after draining. */
  733. static void restart_xdn_switch(void *arg)
  734. {
  735. struct hardwall_type *hwt = arg;
  736. #if CHIP_HAS_REV1_XDN()
  737. /* One last drain step to avoid races with injection and draining. */
  738. empty_xdn_demuxes(hwt);
  739. #endif
  740. reset_xdn_network_state(hwt);
  741. /* Disable firewall interrupts. */
  742. disable_firewall_interrupts(hwt);
  743. }
  744. /* Last reference to a hardwall is gone, so clear the network. */
  745. static void hardwall_destroy(struct hardwall_info *info)
  746. {
  747. struct task_struct *task;
  748. struct hardwall_type *hwt;
  749. unsigned long flags;
  750. /* Make sure this file actually represents a hardwall. */
  751. if (info == NULL)
  752. return;
  753. /*
  754. * Deactivate any remaining tasks. It's possible to race with
  755. * some other thread that is exiting and hasn't yet called
  756. * deactivate (when freeing its thread_info), so we carefully
  757. * deactivate any remaining tasks before freeing the
  758. * hardwall_info object itself.
  759. */
  760. hwt = info->type;
  761. info->teardown_in_progress = 1;
  762. spin_lock_irqsave(&hwt->lock, flags);
  763. list_for_each_entry(task, &info->task_head,
  764. thread.hardwall[hwt->index].list)
  765. _hardwall_deactivate(hwt, task);
  766. spin_unlock_irqrestore(&hwt->lock, flags);
  767. if (hwt->is_xdn) {
  768. /* Configure the switches for draining the user network. */
  769. printk(KERN_DEBUG
  770. "Clearing %s hardwall rectangle %dx%d %d,%d\n",
  771. hwt->name, info->width, info->height,
  772. info->ulhc_x, info->ulhc_y);
  773. on_each_cpu_mask(&info->cpumask, stop_xdn_switch, hwt, 1);
  774. /* Drain the network. */
  775. #if CHIP_HAS_REV1_XDN()
  776. atomic_set(&info->xdn_pending_count,
  777. cpumask_weight(&info->cpumask));
  778. on_each_cpu_mask(&info->cpumask, drain_xdn_switch, info, 0);
  779. #else
  780. on_each_cpu_mask(&info->cpumask, drain_xdn_switch, info, 1);
  781. #endif
  782. /* Restart switch and disable firewall. */
  783. on_each_cpu_mask(&info->cpumask, restart_xdn_switch, hwt, 1);
  784. }
  785. /* Remove the /proc/tile/hardwall entry. */
  786. hardwall_remove_proc(info);
  787. /* Now free the hardwall from the list. */
  788. spin_lock_irqsave(&hwt->lock, flags);
  789. BUG_ON(!list_empty(&info->task_head));
  790. list_del(&info->list);
  791. spin_unlock_irqrestore(&hwt->lock, flags);
  792. kfree(info);
  793. }
  794. static int hardwall_proc_show(struct seq_file *sf, void *v)
  795. {
  796. struct hardwall_info *info = sf->private;
  797. char buf[256];
  798. int rc = cpulist_scnprintf(buf, sizeof(buf), &info->cpumask);
  799. buf[rc++] = '\n';
  800. seq_write(sf, buf, rc);
  801. return 0;
  802. }
  803. static int hardwall_proc_open(struct inode *inode,
  804. struct file *file)
  805. {
  806. return single_open(file, hardwall_proc_show, PDE(inode)->data);
  807. }
  808. static const struct file_operations hardwall_proc_fops = {
  809. .open = hardwall_proc_open,
  810. .read = seq_read,
  811. .llseek = seq_lseek,
  812. .release = single_release,
  813. };
  814. static void hardwall_add_proc(struct hardwall_info *info)
  815. {
  816. char buf[64];
  817. snprintf(buf, sizeof(buf), "%d", info->id);
  818. proc_create_data(buf, 0444, info->type->proc_dir,
  819. &hardwall_proc_fops, info);
  820. }
  821. static void hardwall_remove_proc(struct hardwall_info *info)
  822. {
  823. char buf[64];
  824. snprintf(buf, sizeof(buf), "%d", info->id);
  825. remove_proc_entry(buf, info->type->proc_dir);
  826. }
  827. int proc_pid_hardwall(struct task_struct *task, char *buffer)
  828. {
  829. int i;
  830. int n = 0;
  831. for (i = 0; i < HARDWALL_TYPES; ++i) {
  832. struct hardwall_info *info = task->thread.hardwall[i].info;
  833. if (info)
  834. n += sprintf(&buffer[n], "%s: %d\n",
  835. info->type->name, info->id);
  836. }
  837. return n;
  838. }
  839. void proc_tile_hardwall_init(struct proc_dir_entry *root)
  840. {
  841. int i;
  842. for (i = 0; i < HARDWALL_TYPES; ++i) {
  843. struct hardwall_type *hwt = &hardwall_types[i];
  844. if (hwt->disabled)
  845. continue;
  846. if (hardwall_proc_dir == NULL)
  847. hardwall_proc_dir = proc_mkdir("hardwall", root);
  848. hwt->proc_dir = proc_mkdir(hwt->name, hardwall_proc_dir);
  849. }
  850. }
  851. /*
  852. * Character device support via ioctl/close.
  853. */
  854. static long hardwall_ioctl(struct file *file, unsigned int a, unsigned long b)
  855. {
  856. struct hardwall_info *info = file->private_data;
  857. int minor = iminor(file->f_mapping->host);
  858. struct hardwall_type* hwt;
  859. if (_IOC_TYPE(a) != HARDWALL_IOCTL_BASE)
  860. return -EINVAL;
  861. BUILD_BUG_ON(HARDWALL_TYPES != _HARDWALL_TYPES);
  862. BUILD_BUG_ON(HARDWALL_TYPES !=
  863. sizeof(hardwall_types)/sizeof(hardwall_types[0]));
  864. if (minor < 0 || minor >= HARDWALL_TYPES)
  865. return -EINVAL;
  866. hwt = &hardwall_types[minor];
  867. WARN_ON(info && hwt != info->type);
  868. switch (_IOC_NR(a)) {
  869. case _HARDWALL_CREATE:
  870. if (hwt->disabled)
  871. return -ENOSYS;
  872. if (info != NULL)
  873. return -EALREADY;
  874. info = hardwall_create(hwt, _IOC_SIZE(a),
  875. (const unsigned char __user *)b);
  876. if (IS_ERR(info))
  877. return PTR_ERR(info);
  878. file->private_data = info;
  879. return 0;
  880. case _HARDWALL_ACTIVATE:
  881. return hardwall_activate(info);
  882. case _HARDWALL_DEACTIVATE:
  883. if (current->thread.hardwall[hwt->index].info != info)
  884. return -EINVAL;
  885. return hardwall_deactivate(hwt, current);
  886. case _HARDWALL_GET_ID:
  887. return info ? info->id : -EINVAL;
  888. default:
  889. return -EINVAL;
  890. }
  891. }
  892. #ifdef CONFIG_COMPAT
  893. static long hardwall_compat_ioctl(struct file *file,
  894. unsigned int a, unsigned long b)
  895. {
  896. /* Sign-extend the argument so it can be used as a pointer. */
  897. return hardwall_ioctl(file, a, (unsigned long)compat_ptr(b));
  898. }
  899. #endif
  900. /* The user process closed the file; revoke access to user networks. */
  901. static int hardwall_flush(struct file *file, fl_owner_t owner)
  902. {
  903. struct hardwall_info *info = file->private_data;
  904. struct task_struct *task, *tmp;
  905. unsigned long flags;
  906. if (info) {
  907. /*
  908. * NOTE: if multiple threads are activated on this hardwall
  909. * file, the other threads will continue having access to the
  910. * user network until they are context-switched out and back
  911. * in again.
  912. *
  913. * NOTE: A NULL files pointer means the task is being torn
  914. * down, so in that case we also deactivate it.
  915. */
  916. struct hardwall_type *hwt = info->type;
  917. spin_lock_irqsave(&hwt->lock, flags);
  918. list_for_each_entry_safe(task, tmp, &info->task_head,
  919. thread.hardwall[hwt->index].list) {
  920. if (task->files == owner || task->files == NULL)
  921. _hardwall_deactivate(hwt, task);
  922. }
  923. spin_unlock_irqrestore(&hwt->lock, flags);
  924. }
  925. return 0;
  926. }
  927. /* This hardwall is gone, so destroy it. */
  928. static int hardwall_release(struct inode *inode, struct file *file)
  929. {
  930. hardwall_destroy(file->private_data);
  931. return 0;
  932. }
  933. static const struct file_operations dev_hardwall_fops = {
  934. .open = nonseekable_open,
  935. .unlocked_ioctl = hardwall_ioctl,
  936. #ifdef CONFIG_COMPAT
  937. .compat_ioctl = hardwall_compat_ioctl,
  938. #endif
  939. .flush = hardwall_flush,
  940. .release = hardwall_release,
  941. };
  942. static struct cdev hardwall_dev;
  943. static int __init dev_hardwall_init(void)
  944. {
  945. int rc;
  946. dev_t dev;
  947. rc = alloc_chrdev_region(&dev, 0, HARDWALL_TYPES, "hardwall");
  948. if (rc < 0)
  949. return rc;
  950. cdev_init(&hardwall_dev, &dev_hardwall_fops);
  951. rc = cdev_add(&hardwall_dev, dev, HARDWALL_TYPES);
  952. if (rc < 0)
  953. return rc;
  954. return 0;
  955. }
  956. late_initcall(dev_hardwall_init);