scm_blk.h 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117
  1. #ifndef SCM_BLK_H
  2. #define SCM_BLK_H
  3. #include <linux/interrupt.h>
  4. #include <linux/spinlock.h>
  5. #include <linux/blkdev.h>
  6. #include <linux/genhd.h>
  7. #include <linux/list.h>
  8. #include <asm/debug.h>
  9. #include <asm/eadm.h>
  10. #define SCM_NR_PARTS 8
  11. #define SCM_QUEUE_DELAY 5
  12. struct scm_blk_dev {
  13. struct tasklet_struct tasklet;
  14. struct request_queue *rq;
  15. struct gendisk *gendisk;
  16. struct scm_device *scmdev;
  17. spinlock_t rq_lock; /* guard the request queue */
  18. spinlock_t lock; /* guard the rest of the blockdev */
  19. atomic_t queued_reqs;
  20. struct list_head finished_requests;
  21. #ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
  22. struct list_head cluster_list;
  23. #endif
  24. };
  25. struct scm_request {
  26. struct scm_blk_dev *bdev;
  27. struct request *request;
  28. struct aidaw *aidaw;
  29. struct aob *aob;
  30. struct list_head list;
  31. u8 retries;
  32. int error;
  33. #ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
  34. struct {
  35. enum {CLUSTER_NONE, CLUSTER_READ, CLUSTER_WRITE} state;
  36. struct list_head list;
  37. void **buf;
  38. } cluster;
  39. #endif
  40. };
  41. #define to_aobrq(rq) container_of((void *) rq, struct aob_rq_header, data)
  42. int scm_blk_dev_setup(struct scm_blk_dev *, struct scm_device *);
  43. void scm_blk_dev_cleanup(struct scm_blk_dev *);
  44. void scm_blk_irq(struct scm_device *, void *, int);
  45. void scm_request_finish(struct scm_request *);
  46. void scm_request_requeue(struct scm_request *);
  47. int scm_drv_init(void);
  48. void scm_drv_cleanup(void);
  49. #ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
  50. void __scm_free_rq_cluster(struct scm_request *);
  51. int __scm_alloc_rq_cluster(struct scm_request *);
  52. void scm_request_cluster_init(struct scm_request *);
  53. bool scm_reserve_cluster(struct scm_request *);
  54. void scm_release_cluster(struct scm_request *);
  55. void scm_blk_dev_cluster_setup(struct scm_blk_dev *);
  56. bool scm_need_cluster_request(struct scm_request *);
  57. void scm_initiate_cluster_request(struct scm_request *);
  58. void scm_cluster_request_irq(struct scm_request *);
  59. bool scm_test_cluster_request(struct scm_request *);
  60. bool scm_cluster_size_valid(void);
  61. #else
  62. #define __scm_free_rq_cluster(scmrq) {}
  63. #define __scm_alloc_rq_cluster(scmrq) 0
  64. #define scm_request_cluster_init(scmrq) {}
  65. #define scm_reserve_cluster(scmrq) true
  66. #define scm_release_cluster(scmrq) {}
  67. #define scm_blk_dev_cluster_setup(bdev) {}
  68. #define scm_need_cluster_request(scmrq) false
  69. #define scm_initiate_cluster_request(scmrq) {}
  70. #define scm_cluster_request_irq(scmrq) {}
  71. #define scm_test_cluster_request(scmrq) false
  72. #define scm_cluster_size_valid() true
  73. #endif
  74. extern debug_info_t *scm_debug;
  75. #define SCM_LOG(imp, txt) do { \
  76. debug_text_event(scm_debug, imp, txt); \
  77. } while (0)
  78. static inline void SCM_LOG_HEX(int level, void *data, int length)
  79. {
  80. if (level > scm_debug->level)
  81. return;
  82. while (length > 0) {
  83. debug_event(scm_debug, level, data, length);
  84. length -= scm_debug->buf_size;
  85. data += scm_debug->buf_size;
  86. }
  87. }
  88. static inline void SCM_LOG_STATE(int level, struct scm_device *scmdev)
  89. {
  90. struct {
  91. u64 address;
  92. u8 oper_state;
  93. u8 rank;
  94. } __packed data = {
  95. .address = scmdev->address,
  96. .oper_state = scmdev->attrs.oper_state,
  97. .rank = scmdev->attrs.rank,
  98. };
  99. SCM_LOG_HEX(level, &data, sizeof(data));
  100. }
  101. #endif /* SCM_BLK_H */