|
@@ -63,13 +63,26 @@ struct blkif_common_response {
|
|
|
|
|
|
/* i386 protocol version */
|
|
|
#pragma pack(push, 4)
|
|
|
+
|
|
|
+struct blkif_x86_32_request_rw {
|
|
|
+ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
|
|
|
+ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
|
|
|
+};
|
|
|
+
|
|
|
+struct blkif_x86_32_request_discard {
|
|
|
+ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
|
|
|
+ uint64_t nr_sectors;
|
|
|
+};
|
|
|
+
|
|
|
struct blkif_x86_32_request {
|
|
|
uint8_t operation; /* BLKIF_OP_??? */
|
|
|
uint8_t nr_segments; /* number of segments */
|
|
|
blkif_vdev_t handle; /* only for read/write requests */
|
|
|
uint64_t id; /* private guest value, echoed in resp */
|
|
|
- blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
|
|
|
- struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
|
|
|
+ union {
|
|
|
+ struct blkif_x86_32_request_rw rw;
|
|
|
+ struct blkif_x86_32_request_discard discard;
|
|
|
+ } u;
|
|
|
};
|
|
|
struct blkif_x86_32_response {
|
|
|
uint64_t id; /* copied from request */
|
|
@@ -79,13 +92,26 @@ struct blkif_x86_32_response {
|
|
|
#pragma pack(pop)
|
|
|
|
|
|
/* x86_64 protocol version */
|
|
|
+
|
|
|
+struct blkif_x86_64_request_rw {
|
|
|
+ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
|
|
|
+ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
|
|
|
+};
|
|
|
+
|
|
|
+struct blkif_x86_64_request_discard {
|
|
|
+ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
|
|
|
+ uint64_t nr_sectors;
|
|
|
+};
|
|
|
+
|
|
|
struct blkif_x86_64_request {
|
|
|
uint8_t operation; /* BLKIF_OP_??? */
|
|
|
uint8_t nr_segments; /* number of segments */
|
|
|
blkif_vdev_t handle; /* only for read/write requests */
|
|
|
uint64_t __attribute__((__aligned__(8))) id;
|
|
|
- blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
|
|
|
- struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
|
|
|
+ union {
|
|
|
+ struct blkif_x86_64_request_rw rw;
|
|
|
+ struct blkif_x86_64_request_discard discard;
|
|
|
+ } u;
|
|
|
};
|
|
|
struct blkif_x86_64_response {
|
|
|
uint64_t __attribute__((__aligned__(8))) id;
|
|
@@ -113,6 +139,11 @@ enum blkif_protocol {
|
|
|
BLKIF_PROTOCOL_X86_64 = 3,
|
|
|
};
|
|
|
|
|
|
+enum blkif_backend_type {
|
|
|
+ BLKIF_BACKEND_PHY = 1,
|
|
|
+ BLKIF_BACKEND_FILE = 2,
|
|
|
+};
|
|
|
+
|
|
|
struct xen_vbd {
|
|
|
/* What the domain refers to this vbd as. */
|
|
|
blkif_vdev_t handle;
|
|
@@ -138,6 +169,7 @@ struct xen_blkif {
|
|
|
unsigned int irq;
|
|
|
/* Comms information. */
|
|
|
enum blkif_protocol blk_protocol;
|
|
|
+ enum blkif_backend_type blk_backend_type;
|
|
|
union blkif_back_rings blk_rings;
|
|
|
struct vm_struct *blk_ring_area;
|
|
|
/* The VBD attached to this interface. */
|
|
@@ -159,6 +191,7 @@ struct xen_blkif {
|
|
|
int st_wr_req;
|
|
|
int st_oo_req;
|
|
|
int st_f_req;
|
|
|
+ int st_ds_req;
|
|
|
int st_rd_sect;
|
|
|
int st_wr_sect;
|
|
|
|
|
@@ -182,7 +215,7 @@ struct xen_blkif {
|
|
|
|
|
|
struct phys_req {
|
|
|
unsigned short dev;
|
|
|
- unsigned short nr_sects;
|
|
|
+ blkif_sector_t nr_sects;
|
|
|
struct block_device *bdev;
|
|
|
blkif_sector_t sector_number;
|
|
|
};
|
|
@@ -206,12 +239,25 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
|
|
|
dst->nr_segments = src->nr_segments;
|
|
|
dst->handle = src->handle;
|
|
|
dst->id = src->id;
|
|
|
- dst->u.rw.sector_number = src->sector_number;
|
|
|
- barrier();
|
|
|
- if (n > dst->nr_segments)
|
|
|
- n = dst->nr_segments;
|
|
|
- for (i = 0; i < n; i++)
|
|
|
- dst->u.rw.seg[i] = src->seg[i];
|
|
|
+ switch (src->operation) {
|
|
|
+ case BLKIF_OP_READ:
|
|
|
+ case BLKIF_OP_WRITE:
|
|
|
+ case BLKIF_OP_WRITE_BARRIER:
|
|
|
+ case BLKIF_OP_FLUSH_DISKCACHE:
|
|
|
+ dst->u.rw.sector_number = src->u.rw.sector_number;
|
|
|
+ barrier();
|
|
|
+ if (n > dst->nr_segments)
|
|
|
+ n = dst->nr_segments;
|
|
|
+ for (i = 0; i < n; i++)
|
|
|
+ dst->u.rw.seg[i] = src->u.rw.seg[i];
|
|
|
+ break;
|
|
|
+ case BLKIF_OP_DISCARD:
|
|
|
+ dst->u.discard.sector_number = src->u.discard.sector_number;
|
|
|
+ dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ break;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
static inline void blkif_get_x86_64_req(struct blkif_request *dst,
|
|
@@ -222,12 +268,25 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
|
|
|
dst->nr_segments = src->nr_segments;
|
|
|
dst->handle = src->handle;
|
|
|
dst->id = src->id;
|
|
|
- dst->u.rw.sector_number = src->sector_number;
|
|
|
- barrier();
|
|
|
- if (n > dst->nr_segments)
|
|
|
- n = dst->nr_segments;
|
|
|
- for (i = 0; i < n; i++)
|
|
|
- dst->u.rw.seg[i] = src->seg[i];
|
|
|
+ switch (src->operation) {
|
|
|
+ case BLKIF_OP_READ:
|
|
|
+ case BLKIF_OP_WRITE:
|
|
|
+ case BLKIF_OP_WRITE_BARRIER:
|
|
|
+ case BLKIF_OP_FLUSH_DISKCACHE:
|
|
|
+ dst->u.rw.sector_number = src->u.rw.sector_number;
|
|
|
+ barrier();
|
|
|
+ if (n > dst->nr_segments)
|
|
|
+ n = dst->nr_segments;
|
|
|
+ for (i = 0; i < n; i++)
|
|
|
+ dst->u.rw.seg[i] = src->u.rw.seg[i];
|
|
|
+ break;
|
|
|
+ case BLKIF_OP_DISCARD:
|
|
|
+ dst->u.discard.sector_number = src->u.discard.sector_number;
|
|
|
+ dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ break;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
#endif /* __XEN_BLKIF__BACKEND__COMMON_H__ */
|