pda.h 2.0 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
  7. */
  8. #ifndef _ASM_IA64_SN_PDA_H
  9. #define _ASM_IA64_SN_PDA_H
  10. #include <linux/cache.h>
  11. #include <asm/percpu.h>
  12. #include <asm/system.h>
  13. /*
  14. * CPU-specific data structure.
  15. *
  16. * One of these structures is allocated for each cpu of a NUMA system.
  17. *
  18. * This structure provides a convenient way of keeping together
  19. * all SN per-cpu data structures.
  20. */
  21. typedef struct pda_s {
  22. /*
  23. * Support for SN LEDs
  24. */
  25. volatile short *led_address;
  26. u8 led_state;
  27. u8 hb_state; /* supports blinking heartbeat leds */
  28. unsigned int hb_count;
  29. unsigned int idle_flag;
  30. volatile unsigned long *bedrock_rev_id;
  31. volatile unsigned long *pio_write_status_addr;
  32. unsigned long pio_write_status_val;
  33. volatile unsigned long *pio_shub_war_cam_addr;
  34. unsigned long sn_soft_irr[4];
  35. unsigned long sn_in_service_ivecs[4];
  36. int sn_lb_int_war_ticks;
  37. int sn_last_irq;
  38. int sn_first_irq;
  39. } pda_t;
  40. #define CACHE_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
  41. /*
  42. * PDA
  43. * Per-cpu private data area for each cpu. The PDA is located immediately after
  44. * the IA64 cpu_data area. A full page is allocated for the cp_data area for each
  45. * cpu but only a small amout of the page is actually used. We put the SNIA PDA
  46. * in the same page as the cpu_data area. Note that there is a check in the setup
  47. * code to verify that we don't overflow the page.
  48. *
  49. * Seems like we should should cache-line align the pda so that any changes in the
  50. * size of the cpu_data area don't change cache layout. Should we align to 32, 64, 128
  51. * or 512 boundary. Each has merits. For now, pick 128 but should be revisited later.
  52. */
  53. DECLARE_PER_CPU(struct pda_s, pda_percpu);
  54. #define pda (&__ia64_per_cpu_var(pda_percpu))
  55. #define pdacpu(cpu) (&per_cpu(pda_percpu, cpu))
  56. #endif /* _ASM_IA64_SN_PDA_H */