|
@@ -94,7 +94,7 @@ static void mmc_host_clk_gate_delayed(struct mmc_host *host)
|
|
|
spin_unlock_irqrestore(&host->clk_lock, flags);
|
|
|
return;
|
|
|
}
|
|
|
- mmc_claim_host(host);
|
|
|
+ mutex_lock(&host->clk_gate_mutex);
|
|
|
spin_lock_irqsave(&host->clk_lock, flags);
|
|
|
if (!host->clk_requests) {
|
|
|
spin_unlock_irqrestore(&host->clk_lock, flags);
|
|
@@ -104,7 +104,7 @@ static void mmc_host_clk_gate_delayed(struct mmc_host *host)
|
|
|
pr_debug("%s: gated MCI clock\n", mmc_hostname(host));
|
|
|
}
|
|
|
spin_unlock_irqrestore(&host->clk_lock, flags);
|
|
|
- mmc_release_host(host);
|
|
|
+ mutex_unlock(&host->clk_gate_mutex);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -130,7 +130,7 @@ void mmc_host_clk_ungate(struct mmc_host *host)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
|
|
|
- mmc_claim_host(host);
|
|
|
+ mutex_lock(&host->clk_gate_mutex);
|
|
|
spin_lock_irqsave(&host->clk_lock, flags);
|
|
|
if (host->clk_gated) {
|
|
|
spin_unlock_irqrestore(&host->clk_lock, flags);
|
|
@@ -140,7 +140,7 @@ void mmc_host_clk_ungate(struct mmc_host *host)
|
|
|
}
|
|
|
host->clk_requests++;
|
|
|
spin_unlock_irqrestore(&host->clk_lock, flags);
|
|
|
- mmc_release_host(host);
|
|
|
+ mutex_unlock(&host->clk_gate_mutex);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -215,6 +215,7 @@ static inline void mmc_host_clk_init(struct mmc_host *host)
|
|
|
host->clk_gated = false;
|
|
|
INIT_WORK(&host->clk_gate_work, mmc_host_clk_gate_work);
|
|
|
spin_lock_init(&host->clk_lock);
|
|
|
+ mutex_init(&host->clk_gate_mutex);
|
|
|
}
|
|
|
|
|
|
/**
|