|
@@ -36,7 +36,6 @@
|
|
|
|
|
|
/* Register offsets */
|
|
|
#define MG_BUFF_OFFSET 0x8000
|
|
|
-#define MG_STORAGE_BUFFER_SIZE 0x200
|
|
|
#define MG_REG_OFFSET 0xC000
|
|
|
#define MG_REG_FEATURE (MG_REG_OFFSET + 2) /* write case */
|
|
|
#define MG_REG_ERROR (MG_REG_OFFSET + 2) /* read case */
|
|
@@ -477,9 +476,18 @@ static unsigned int mg_out(struct mg_host *host,
|
|
|
return MG_ERR_NONE;
|
|
|
}
|
|
|
|
|
|
+static void mg_read_one(struct mg_host *host, struct request *req)
|
|
|
+{
|
|
|
+ u16 *buff = (u16 *)req->buffer;
|
|
|
+ u32 i;
|
|
|
+
|
|
|
+ for (i = 0; i < MG_SECTOR_SIZE >> 1; i++)
|
|
|
+ *buff++ = inw((unsigned long)host->dev_base + MG_BUFF_OFFSET +
|
|
|
+ (i << 1));
|
|
|
+}
|
|
|
+
|
|
|
static void mg_read(struct request *req)
|
|
|
{
|
|
|
- u32 j;
|
|
|
struct mg_host *host = req->rq_disk->private_data;
|
|
|
|
|
|
if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req),
|
|
@@ -490,26 +498,33 @@ static void mg_read(struct request *req)
|
|
|
blk_rq_sectors(req), blk_rq_pos(req), req->buffer);
|
|
|
|
|
|
do {
|
|
|
- u16 *buff = (u16 *)req->buffer;
|
|
|
-
|
|
|
if (mg_wait(host, ATA_DRQ,
|
|
|
MG_TMAX_WAIT_RD_DRQ) != MG_ERR_NONE) {
|
|
|
mg_bad_rw_intr(host);
|
|
|
return;
|
|
|
}
|
|
|
- for (j = 0; j < MG_SECTOR_SIZE >> 1; j++)
|
|
|
- *buff++ = inw((unsigned long)host->dev_base +
|
|
|
- MG_BUFF_OFFSET + (j << 1));
|
|
|
+
|
|
|
+ mg_read_one(host, req);
|
|
|
|
|
|
outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base +
|
|
|
MG_REG_COMMAND);
|
|
|
} while (mg_end_request(host, 0, MG_SECTOR_SIZE));
|
|
|
}
|
|
|
|
|
|
+static void mg_write_one(struct mg_host *host, struct request *req)
|
|
|
+{
|
|
|
+ u16 *buff = (u16 *)req->buffer;
|
|
|
+ u32 i;
|
|
|
+
|
|
|
+ for (i = 0; i < MG_SECTOR_SIZE >> 1; i++)
|
|
|
+ outw(*buff++, (unsigned long)host->dev_base + MG_BUFF_OFFSET +
|
|
|
+ (i << 1));
|
|
|
+}
|
|
|
+
|
|
|
static void mg_write(struct request *req)
|
|
|
{
|
|
|
- u32 j;
|
|
|
struct mg_host *host = req->rq_disk->private_data;
|
|
|
+ bool rem;
|
|
|
|
|
|
if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req),
|
|
|
MG_CMD_WR, NULL) != MG_ERR_NONE) {
|
|
@@ -520,27 +535,37 @@ static void mg_write(struct request *req)
|
|
|
MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
|
|
|
blk_rq_sectors(req), blk_rq_pos(req), req->buffer);
|
|
|
|
|
|
- do {
|
|
|
- u16 *buff = (u16 *)req->buffer;
|
|
|
+ if (mg_wait(host, ATA_DRQ,
|
|
|
+ MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) {
|
|
|
+ mg_bad_rw_intr(host);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ mg_write_one(host, req);
|
|
|
+
|
|
|
+ outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
|
|
|
|
|
|
- if (mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) {
|
|
|
+ do {
|
|
|
+ if (blk_rq_sectors(req) > 1 &&
|
|
|
+ mg_wait(host, ATA_DRQ,
|
|
|
+ MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) {
|
|
|
mg_bad_rw_intr(host);
|
|
|
return;
|
|
|
}
|
|
|
- for (j = 0; j < MG_SECTOR_SIZE >> 1; j++)
|
|
|
- outw(*buff++, (unsigned long)host->dev_base +
|
|
|
- MG_BUFF_OFFSET + (j << 1));
|
|
|
|
|
|
- outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
|
|
|
- MG_REG_COMMAND);
|
|
|
- } while (mg_end_request(host, 0, MG_SECTOR_SIZE));
|
|
|
+ rem = mg_end_request(host, 0, MG_SECTOR_SIZE);
|
|
|
+ if (rem)
|
|
|
+ mg_write_one(host, req);
|
|
|
+
|
|
|
+ outb(MG_CMD_WR_CONF,
|
|
|
+ (unsigned long)host->dev_base + MG_REG_COMMAND);
|
|
|
+ } while (rem);
|
|
|
}
|
|
|
|
|
|
static void mg_read_intr(struct mg_host *host)
|
|
|
{
|
|
|
struct request *req = host->req;
|
|
|
u32 i;
|
|
|
- u16 *buff;
|
|
|
|
|
|
/* check status */
|
|
|
do {
|
|
@@ -558,13 +583,7 @@ static void mg_read_intr(struct mg_host *host)
|
|
|
return;
|
|
|
|
|
|
ok_to_read:
|
|
|
- /* get current segment of request */
|
|
|
- buff = (u16 *)req->buffer;
|
|
|
-
|
|
|
- /* read 1 sector */
|
|
|
- for (i = 0; i < MG_SECTOR_SIZE >> 1; i++)
|
|
|
- *buff++ = inw((unsigned long)host->dev_base + MG_BUFF_OFFSET +
|
|
|
- (i << 1));
|
|
|
+ mg_read_one(host, req);
|
|
|
|
|
|
MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
|
|
|
blk_rq_pos(req), blk_rq_sectors(req) - 1, req->buffer);
|
|
@@ -583,8 +602,7 @@ ok_to_read:
|
|
|
static void mg_write_intr(struct mg_host *host)
|
|
|
{
|
|
|
struct request *req = host->req;
|
|
|
- u32 i, j;
|
|
|
- u16 *buff;
|
|
|
+ u32 i;
|
|
|
bool rem;
|
|
|
|
|
|
/* check status */
|
|
@@ -605,12 +623,7 @@ static void mg_write_intr(struct mg_host *host)
|
|
|
ok_to_write:
|
|
|
if ((rem = mg_end_request(host, 0, MG_SECTOR_SIZE))) {
|
|
|
/* write 1 sector and set handler if remains */
|
|
|
- buff = (u16 *)req->buffer;
|
|
|
- for (j = 0; j < MG_STORAGE_BUFFER_SIZE >> 1; j++) {
|
|
|
- outw(*buff, (unsigned long)host->dev_base +
|
|
|
- MG_BUFF_OFFSET + (j << 1));
|
|
|
- buff++;
|
|
|
- }
|
|
|
+ mg_write_one(host, req);
|
|
|
MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
|
|
|
blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
|
|
|
host->mg_do_intr = mg_write_intr;
|
|
@@ -675,9 +688,6 @@ static unsigned int mg_issue_req(struct request *req,
|
|
|
unsigned int sect_num,
|
|
|
unsigned int sect_cnt)
|
|
|
{
|
|
|
- u16 *buff;
|
|
|
- u32 i;
|
|
|
-
|
|
|
switch (rq_data_dir(req)) {
|
|
|
case READ:
|
|
|
if (mg_out(host, sect_num, sect_cnt, MG_CMD_RD, &mg_read_intr)
|
|
@@ -701,12 +711,7 @@ static unsigned int mg_issue_req(struct request *req,
|
|
|
mg_bad_rw_intr(host);
|
|
|
return host->error;
|
|
|
}
|
|
|
- buff = (u16 *)req->buffer;
|
|
|
- for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) {
|
|
|
- outw(*buff, (unsigned long)host->dev_base +
|
|
|
- MG_BUFF_OFFSET + (i << 1));
|
|
|
- buff++;
|
|
|
- }
|
|
|
+ mg_write_one(host, req);
|
|
|
mod_timer(&host->timer, jiffies + 3 * HZ);
|
|
|
outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
|
|
|
MG_REG_COMMAND);
|