|
@@ -35,7 +35,7 @@ MODULE_VERSION(DCA_VERSION);
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_AUTHOR("Intel Corporation");
|
|
MODULE_AUTHOR("Intel Corporation");
|
|
|
|
|
|
-static DEFINE_SPINLOCK(dca_lock);
|
|
|
|
|
|
+static DEFINE_RAW_SPINLOCK(dca_lock);
|
|
|
|
|
|
static LIST_HEAD(dca_domains);
|
|
static LIST_HEAD(dca_domains);
|
|
|
|
|
|
@@ -101,10 +101,10 @@ static void unregister_dca_providers(void)
|
|
|
|
|
|
INIT_LIST_HEAD(&unregistered_providers);
|
|
INIT_LIST_HEAD(&unregistered_providers);
|
|
|
|
|
|
- spin_lock_irqsave(&dca_lock, flags);
|
|
|
|
|
|
+ raw_spin_lock_irqsave(&dca_lock, flags);
|
|
|
|
|
|
if (list_empty(&dca_domains)) {
|
|
if (list_empty(&dca_domains)) {
|
|
- spin_unlock_irqrestore(&dca_lock, flags);
|
|
|
|
|
|
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -116,7 +116,7 @@ static void unregister_dca_providers(void)
|
|
|
|
|
|
dca_free_domain(domain);
|
|
dca_free_domain(domain);
|
|
|
|
|
|
- spin_unlock_irqrestore(&dca_lock, flags);
|
|
|
|
|
|
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
|
|
|
|
|
|
list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) {
|
|
list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) {
|
|
dca_sysfs_remove_provider(dca);
|
|
dca_sysfs_remove_provider(dca);
|
|
@@ -144,13 +144,8 @@ static struct dca_domain *dca_get_domain(struct device *dev)
|
|
domain = dca_find_domain(rc);
|
|
domain = dca_find_domain(rc);
|
|
|
|
|
|
if (!domain) {
|
|
if (!domain) {
|
|
- if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains)) {
|
|
|
|
|
|
+ if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains))
|
|
dca_providers_blocked = 1;
|
|
dca_providers_blocked = 1;
|
|
- } else {
|
|
|
|
- domain = dca_allocate_domain(rc);
|
|
|
|
- if (domain)
|
|
|
|
- list_add(&domain->node, &dca_domains);
|
|
|
|
- }
|
|
|
|
}
|
|
}
|
|
|
|
|
|
return domain;
|
|
return domain;
|
|
@@ -198,19 +193,19 @@ int dca_add_requester(struct device *dev)
|
|
if (!dev)
|
|
if (!dev)
|
|
return -EFAULT;
|
|
return -EFAULT;
|
|
|
|
|
|
- spin_lock_irqsave(&dca_lock, flags);
|
|
|
|
|
|
+ raw_spin_lock_irqsave(&dca_lock, flags);
|
|
|
|
|
|
/* check if the requester has not been added already */
|
|
/* check if the requester has not been added already */
|
|
dca = dca_find_provider_by_dev(dev);
|
|
dca = dca_find_provider_by_dev(dev);
|
|
if (dca) {
|
|
if (dca) {
|
|
- spin_unlock_irqrestore(&dca_lock, flags);
|
|
|
|
|
|
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
|
|
return -EEXIST;
|
|
return -EEXIST;
|
|
}
|
|
}
|
|
|
|
|
|
pci_rc = dca_pci_rc_from_dev(dev);
|
|
pci_rc = dca_pci_rc_from_dev(dev);
|
|
domain = dca_find_domain(pci_rc);
|
|
domain = dca_find_domain(pci_rc);
|
|
if (!domain) {
|
|
if (!domain) {
|
|
- spin_unlock_irqrestore(&dca_lock, flags);
|
|
|
|
|
|
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
|
|
return -ENODEV;
|
|
return -ENODEV;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -220,17 +215,17 @@ int dca_add_requester(struct device *dev)
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
|
|
|
|
- spin_unlock_irqrestore(&dca_lock, flags);
|
|
|
|
|
|
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
|
|
|
|
|
|
if (slot < 0)
|
|
if (slot < 0)
|
|
return slot;
|
|
return slot;
|
|
|
|
|
|
err = dca_sysfs_add_req(dca, dev, slot);
|
|
err = dca_sysfs_add_req(dca, dev, slot);
|
|
if (err) {
|
|
if (err) {
|
|
- spin_lock_irqsave(&dca_lock, flags);
|
|
|
|
|
|
+ raw_spin_lock_irqsave(&dca_lock, flags);
|
|
if (dca == dca_find_provider_by_dev(dev))
|
|
if (dca == dca_find_provider_by_dev(dev))
|
|
dca->ops->remove_requester(dca, dev);
|
|
dca->ops->remove_requester(dca, dev);
|
|
- spin_unlock_irqrestore(&dca_lock, flags);
|
|
|
|
|
|
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
|
|
return err;
|
|
return err;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -251,14 +246,14 @@ int dca_remove_requester(struct device *dev)
|
|
if (!dev)
|
|
if (!dev)
|
|
return -EFAULT;
|
|
return -EFAULT;
|
|
|
|
|
|
- spin_lock_irqsave(&dca_lock, flags);
|
|
|
|
|
|
+ raw_spin_lock_irqsave(&dca_lock, flags);
|
|
dca = dca_find_provider_by_dev(dev);
|
|
dca = dca_find_provider_by_dev(dev);
|
|
if (!dca) {
|
|
if (!dca) {
|
|
- spin_unlock_irqrestore(&dca_lock, flags);
|
|
|
|
|
|
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
|
|
return -ENODEV;
|
|
return -ENODEV;
|
|
}
|
|
}
|
|
slot = dca->ops->remove_requester(dca, dev);
|
|
slot = dca->ops->remove_requester(dca, dev);
|
|
- spin_unlock_irqrestore(&dca_lock, flags);
|
|
|
|
|
|
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
|
|
|
|
|
|
if (slot < 0)
|
|
if (slot < 0)
|
|
return slot;
|
|
return slot;
|
|
@@ -280,16 +275,16 @@ u8 dca_common_get_tag(struct device *dev, int cpu)
|
|
u8 tag;
|
|
u8 tag;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
|
|
|
|
- spin_lock_irqsave(&dca_lock, flags);
|
|
|
|
|
|
+ raw_spin_lock_irqsave(&dca_lock, flags);
|
|
|
|
|
|
dca = dca_find_provider_by_dev(dev);
|
|
dca = dca_find_provider_by_dev(dev);
|
|
if (!dca) {
|
|
if (!dca) {
|
|
- spin_unlock_irqrestore(&dca_lock, flags);
|
|
|
|
|
|
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
|
|
return -ENODEV;
|
|
return -ENODEV;
|
|
}
|
|
}
|
|
tag = dca->ops->get_tag(dca, dev, cpu);
|
|
tag = dca->ops->get_tag(dca, dev, cpu);
|
|
|
|
|
|
- spin_unlock_irqrestore(&dca_lock, flags);
|
|
|
|
|
|
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
|
|
return tag;
|
|
return tag;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -360,36 +355,51 @@ int register_dca_provider(struct dca_provider *dca, struct device *dev)
|
|
{
|
|
{
|
|
int err;
|
|
int err;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
- struct dca_domain *domain;
|
|
|
|
|
|
+ struct dca_domain *domain, *newdomain = NULL;
|
|
|
|
|
|
- spin_lock_irqsave(&dca_lock, flags);
|
|
|
|
|
|
+ raw_spin_lock_irqsave(&dca_lock, flags);
|
|
if (dca_providers_blocked) {
|
|
if (dca_providers_blocked) {
|
|
- spin_unlock_irqrestore(&dca_lock, flags);
|
|
|
|
|
|
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
|
|
return -ENODEV;
|
|
return -ENODEV;
|
|
}
|
|
}
|
|
- spin_unlock_irqrestore(&dca_lock, flags);
|
|
|
|
|
|
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
|
|
|
|
|
|
err = dca_sysfs_add_provider(dca, dev);
|
|
err = dca_sysfs_add_provider(dca, dev);
|
|
if (err)
|
|
if (err)
|
|
return err;
|
|
return err;
|
|
|
|
|
|
- spin_lock_irqsave(&dca_lock, flags);
|
|
|
|
|
|
+ raw_spin_lock_irqsave(&dca_lock, flags);
|
|
domain = dca_get_domain(dev);
|
|
domain = dca_get_domain(dev);
|
|
if (!domain) {
|
|
if (!domain) {
|
|
|
|
+ struct pci_bus *rc;
|
|
|
|
+
|
|
if (dca_providers_blocked) {
|
|
if (dca_providers_blocked) {
|
|
- spin_unlock_irqrestore(&dca_lock, flags);
|
|
|
|
|
|
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
|
|
dca_sysfs_remove_provider(dca);
|
|
dca_sysfs_remove_provider(dca);
|
|
unregister_dca_providers();
|
|
unregister_dca_providers();
|
|
- } else {
|
|
|
|
- spin_unlock_irqrestore(&dca_lock, flags);
|
|
|
|
|
|
+ return -ENODEV;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
|
|
|
|
+ rc = dca_pci_rc_from_dev(dev);
|
|
|
|
+ newdomain = dca_allocate_domain(rc);
|
|
|
|
+ if (!newdomain)
|
|
|
|
+ return -ENODEV;
|
|
|
|
+ raw_spin_lock_irqsave(&dca_lock, flags);
|
|
|
|
+ /* Recheck, we might have raced after dropping the lock */
|
|
|
|
+ domain = dca_get_domain(dev);
|
|
|
|
+ if (!domain) {
|
|
|
|
+ domain = newdomain;
|
|
|
|
+ newdomain = NULL;
|
|
|
|
+ list_add(&domain->node, &dca_domains);
|
|
}
|
|
}
|
|
- return -ENODEV;
|
|
|
|
}
|
|
}
|
|
list_add(&dca->node, &domain->dca_providers);
|
|
list_add(&dca->node, &domain->dca_providers);
|
|
- spin_unlock_irqrestore(&dca_lock, flags);
|
|
|
|
|
|
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
|
|
|
|
|
|
blocking_notifier_call_chain(&dca_provider_chain,
|
|
blocking_notifier_call_chain(&dca_provider_chain,
|
|
DCA_PROVIDER_ADD, NULL);
|
|
DCA_PROVIDER_ADD, NULL);
|
|
|
|
+ kfree(newdomain);
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(register_dca_provider);
|
|
EXPORT_SYMBOL_GPL(register_dca_provider);
|
|
@@ -407,7 +417,7 @@ void unregister_dca_provider(struct dca_provider *dca, struct device *dev)
|
|
blocking_notifier_call_chain(&dca_provider_chain,
|
|
blocking_notifier_call_chain(&dca_provider_chain,
|
|
DCA_PROVIDER_REMOVE, NULL);
|
|
DCA_PROVIDER_REMOVE, NULL);
|
|
|
|
|
|
- spin_lock_irqsave(&dca_lock, flags);
|
|
|
|
|
|
+ raw_spin_lock_irqsave(&dca_lock, flags);
|
|
|
|
|
|
list_del(&dca->node);
|
|
list_del(&dca->node);
|
|
|
|
|
|
@@ -416,7 +426,7 @@ void unregister_dca_provider(struct dca_provider *dca, struct device *dev)
|
|
if (list_empty(&domain->dca_providers))
|
|
if (list_empty(&domain->dca_providers))
|
|
dca_free_domain(domain);
|
|
dca_free_domain(domain);
|
|
|
|
|
|
- spin_unlock_irqrestore(&dca_lock, flags);
|
|
|
|
|
|
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
|
|
|
|
|
|
dca_sysfs_remove_provider(dca);
|
|
dca_sysfs_remove_provider(dca);
|
|
}
|
|
}
|