|
@@ -29,7 +29,6 @@
|
|
|
#include <linux/mm.h>
|
|
|
#include <linux/time.h>
|
|
|
#include <linux/list.h>
|
|
|
-#include <linux/semaphore.h>
|
|
|
|
|
|
#include "omap_dmm_tiler.h"
|
|
|
#include "omap_dmm_priv.h"
|
|
@@ -120,6 +119,18 @@ static int wait_status(struct refill_engine *engine, uint32_t wait_mask)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static void release_engine(struct refill_engine *engine)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&list_lock, flags);
|
|
|
+ list_add(&engine->idle_node, &omap_dmm->idle_head);
|
|
|
+ spin_unlock_irqrestore(&list_lock, flags);
|
|
|
+
|
|
|
+ atomic_inc(&omap_dmm->engine_counter);
|
|
|
+ wake_up_interruptible(&omap_dmm->engine_queue);
|
|
|
+}
|
|
|
+
|
|
|
static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
|
|
|
{
|
|
|
struct dmm *dmm = arg;
|
|
@@ -130,9 +141,13 @@ static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
|
|
|
writel(status, dmm->base + DMM_PAT_IRQSTATUS);
|
|
|
|
|
|
for (i = 0; i < dmm->num_engines; i++) {
|
|
|
- if (status & DMM_IRQSTAT_LST)
|
|
|
+ if (status & DMM_IRQSTAT_LST) {
|
|
|
wake_up_interruptible(&dmm->engines[i].wait_for_refill);
|
|
|
|
|
|
+ if (dmm->engines[i].async)
|
|
|
+ release_engine(&dmm->engines[i]);
|
|
|
+ }
|
|
|
+
|
|
|
status >>= 8;
|
|
|
}
|
|
|
|
|
@@ -146,17 +161,24 @@ static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm)
|
|
|
{
|
|
|
struct dmm_txn *txn = NULL;
|
|
|
struct refill_engine *engine = NULL;
|
|
|
+ int ret;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
- down(&dmm->engine_sem);
|
|
|
+
|
|
|
+ /* wait until an engine is available */
|
|
|
+ ret = wait_event_interruptible(omap_dmm->engine_queue,
|
|
|
+ atomic_add_unless(&omap_dmm->engine_counter, -1, 0));
|
|
|
+ if (ret)
|
|
|
+ return ERR_PTR(ret);
|
|
|
|
|
|
/* grab an idle engine */
|
|
|
- spin_lock(&list_lock);
|
|
|
+ spin_lock_irqsave(&list_lock, flags);
|
|
|
if (!list_empty(&dmm->idle_head)) {
|
|
|
engine = list_entry(dmm->idle_head.next, struct refill_engine,
|
|
|
idle_node);
|
|
|
list_del(&engine->idle_node);
|
|
|
}
|
|
|
- spin_unlock(&list_lock);
|
|
|
+ spin_unlock_irqrestore(&list_lock, flags);
|
|
|
|
|
|
BUG_ON(!engine);
|
|
|
|
|
@@ -174,7 +196,7 @@ static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm)
|
|
|
* Add region to DMM transaction. If pages or pages[i] is NULL, then the
|
|
|
* corresponding slot is cleared (ie. dummy_pa is programmed)
|
|
|
*/
|
|
|
-static int dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
|
|
|
+static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
|
|
|
struct page **pages, uint32_t npages, uint32_t roll)
|
|
|
{
|
|
|
dma_addr_t pat_pa = 0;
|
|
@@ -208,7 +230,7 @@ static int dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
|
|
|
|
|
|
txn->last_pat = pat;
|
|
|
|
|
|
- return 0;
|
|
|
+ return;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -238,6 +260,9 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
|
|
|
goto cleanup;
|
|
|
}
|
|
|
|
|
|
+ /* mark whether it is async to denote list management in IRQ handler */
|
|
|
+ engine->async = wait ? false : true;
|
|
|
+
|
|
|
/* kick reload */
|
|
|
writel(engine->refill_pa,
|
|
|
dmm->base + reg[PAT_DESCR][engine->id]);
|
|
@@ -252,11 +277,10 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
|
|
|
}
|
|
|
|
|
|
cleanup:
|
|
|
- spin_lock(&list_lock);
|
|
|
- list_add(&engine->idle_node, &dmm->idle_head);
|
|
|
- spin_unlock(&list_lock);
|
|
|
+ /* only place engine back on list if we are done with it */
|
|
|
+ if (ret || wait)
|
|
|
+ release_engine(engine);
|
|
|
|
|
|
- up(&omap_dmm->engine_sem);
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
@@ -280,16 +304,13 @@ static int fill(struct tcm_area *area, struct page **pages,
|
|
|
.x1 = slice.p1.x, .y1 = slice.p1.y,
|
|
|
};
|
|
|
|
|
|
- ret = dmm_txn_append(txn, &p_area, pages, npages, roll);
|
|
|
- if (ret)
|
|
|
- goto fail;
|
|
|
+ dmm_txn_append(txn, &p_area, pages, npages, roll);
|
|
|
|
|
|
roll += tcm_sizeof(slice);
|
|
|
}
|
|
|
|
|
|
ret = dmm_txn_commit(txn, wait);
|
|
|
|
|
|
-fail:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
@@ -326,6 +347,7 @@ struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w,
|
|
|
struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
|
|
|
u32 min_align = 128;
|
|
|
int ret;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
BUG_ON(!validfmt(fmt));
|
|
|
|
|
@@ -347,9 +369,9 @@ struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w,
|
|
|
}
|
|
|
|
|
|
/* add to allocation list */
|
|
|
- spin_lock(&list_lock);
|
|
|
+ spin_lock_irqsave(&list_lock, flags);
|
|
|
list_add(&block->alloc_node, &omap_dmm->alloc_head);
|
|
|
- spin_unlock(&list_lock);
|
|
|
+ spin_unlock_irqrestore(&list_lock, flags);
|
|
|
|
|
|
return block;
|
|
|
}
|
|
@@ -358,6 +380,7 @@ struct tiler_block *tiler_reserve_1d(size_t size)
|
|
|
{
|
|
|
struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
|
|
|
int num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
if (!block)
|
|
|
return ERR_PTR(-ENOMEM);
|
|
@@ -370,9 +393,9 @@ struct tiler_block *tiler_reserve_1d(size_t size)
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
}
|
|
|
|
|
|
- spin_lock(&list_lock);
|
|
|
+ spin_lock_irqsave(&list_lock, flags);
|
|
|
list_add(&block->alloc_node, &omap_dmm->alloc_head);
|
|
|
- spin_unlock(&list_lock);
|
|
|
+ spin_unlock_irqrestore(&list_lock, flags);
|
|
|
|
|
|
return block;
|
|
|
}
|
|
@@ -381,13 +404,14 @@ struct tiler_block *tiler_reserve_1d(size_t size)
|
|
|
int tiler_release(struct tiler_block *block)
|
|
|
{
|
|
|
int ret = tcm_free(&block->area);
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
if (block->area.tcm)
|
|
|
dev_err(omap_dmm->dev, "failed to release block\n");
|
|
|
|
|
|
- spin_lock(&list_lock);
|
|
|
+ spin_lock_irqsave(&list_lock, flags);
|
|
|
list_del(&block->alloc_node);
|
|
|
- spin_unlock(&list_lock);
|
|
|
+ spin_unlock_irqrestore(&list_lock, flags);
|
|
|
|
|
|
kfree(block);
|
|
|
return ret;
|
|
@@ -507,16 +531,17 @@ static int omap_dmm_remove(struct platform_device *dev)
|
|
|
{
|
|
|
struct tiler_block *block, *_block;
|
|
|
int i;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
if (omap_dmm) {
|
|
|
/* free all area regions */
|
|
|
- spin_lock(&list_lock);
|
|
|
+ spin_lock_irqsave(&list_lock, flags);
|
|
|
list_for_each_entry_safe(block, _block, &omap_dmm->alloc_head,
|
|
|
alloc_node) {
|
|
|
list_del(&block->alloc_node);
|
|
|
kfree(block);
|
|
|
}
|
|
|
- spin_unlock(&list_lock);
|
|
|
+ spin_unlock_irqrestore(&list_lock, flags);
|
|
|
|
|
|
for (i = 0; i < omap_dmm->num_lut; i++)
|
|
|
if (omap_dmm->tcm && omap_dmm->tcm[i])
|
|
@@ -560,6 +585,8 @@ static int omap_dmm_probe(struct platform_device *dev)
|
|
|
INIT_LIST_HEAD(&omap_dmm->alloc_head);
|
|
|
INIT_LIST_HEAD(&omap_dmm->idle_head);
|
|
|
|
|
|
+ init_waitqueue_head(&omap_dmm->engine_queue);
|
|
|
+
|
|
|
/* lookup hwmod data - base address and irq */
|
|
|
mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
|
|
|
if (!mem) {
|
|
@@ -588,6 +615,8 @@ static int omap_dmm_probe(struct platform_device *dev)
|
|
|
omap_dmm->container_width = 256;
|
|
|
omap_dmm->container_height = 128;
|
|
|
|
|
|
+ atomic_set(&omap_dmm->engine_counter, omap_dmm->num_engines);
|
|
|
+
|
|
|
/* read out actual LUT width and height */
|
|
|
pat_geom = readl(omap_dmm->base + DMM_PAT_GEOMETRY);
|
|
|
omap_dmm->lut_width = ((pat_geom >> 16) & 0xF) << 5;
|
|
@@ -651,7 +680,6 @@ static int omap_dmm_probe(struct platform_device *dev)
|
|
|
goto fail;
|
|
|
}
|
|
|
|
|
|
- sema_init(&omap_dmm->engine_sem, omap_dmm->num_engines);
|
|
|
for (i = 0; i < omap_dmm->num_engines; i++) {
|
|
|
omap_dmm->engines[i].id = i;
|
|
|
omap_dmm->engines[i].dmm = omap_dmm;
|