|
@@ -39,6 +39,10 @@ static DEFINE_SPINLOCK(dca_lock);
|
|
|
|
|
|
static LIST_HEAD(dca_domains);
|
|
static LIST_HEAD(dca_domains);
|
|
|
|
|
|
|
|
+static BLOCKING_NOTIFIER_HEAD(dca_provider_chain);
|
|
|
|
+
|
|
|
|
+static int dca_providers_blocked;
|
|
|
|
+
|
|
static struct pci_bus *dca_pci_rc_from_dev(struct device *dev)
|
|
static struct pci_bus *dca_pci_rc_from_dev(struct device *dev)
|
|
{
|
|
{
|
|
struct pci_dev *pdev = to_pci_dev(dev);
|
|
struct pci_dev *pdev = to_pci_dev(dev);
|
|
@@ -70,6 +74,60 @@ static void dca_free_domain(struct dca_domain *domain)
|
|
kfree(domain);
|
|
kfree(domain);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static int dca_provider_ioat_ver_3_0(struct device *dev)
|
|
|
|
+{
|
|
|
|
+ struct pci_dev *pdev = to_pci_dev(dev);
|
|
|
|
+
|
|
|
|
+ return ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
|
|
|
|
+ ((pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG0) ||
|
|
|
|
+ (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG1) ||
|
|
|
|
+ (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG2) ||
|
|
|
|
+ (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG3) ||
|
|
|
|
+ (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG4) ||
|
|
|
|
+ (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG5) ||
|
|
|
|
+ (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG6) ||
|
|
|
|
+ (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG7)));
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void unregister_dca_providers(void)
|
|
|
|
+{
|
|
|
|
+ struct dca_provider *dca, *_dca;
|
|
|
|
+ struct list_head unregistered_providers;
|
|
|
|
+ struct dca_domain *domain;
|
|
|
|
+ unsigned long flags;
|
|
|
|
+
|
|
|
|
+ blocking_notifier_call_chain(&dca_provider_chain,
|
|
|
|
+ DCA_PROVIDER_REMOVE, NULL);
|
|
|
|
+
|
|
|
|
+ INIT_LIST_HEAD(&unregistered_providers);
|
|
|
|
+
|
|
|
|
+ spin_lock_irqsave(&dca_lock, flags);
|
|
|
|
+
|
|
|
|
+ if (list_empty(&dca_domains)) {
|
|
|
|
+ spin_unlock_irqrestore(&dca_lock, flags);
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* at this point only one domain in the list is expected */
|
|
|
|
+ domain = list_first_entry(&dca_domains, struct dca_domain, node);
|
|
|
|
+ if (!domain)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node) {
|
|
|
|
+ list_del(&dca->node);
|
|
|
|
+ list_add(&dca->node, &unregistered_providers);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ dca_free_domain(domain);
|
|
|
|
+
|
|
|
|
+ spin_unlock_irqrestore(&dca_lock, flags);
|
|
|
|
+
|
|
|
|
+ list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) {
|
|
|
|
+ dca_sysfs_remove_provider(dca);
|
|
|
|
+ list_del(&dca->node);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
static struct dca_domain *dca_find_domain(struct pci_bus *rc)
|
|
static struct dca_domain *dca_find_domain(struct pci_bus *rc)
|
|
{
|
|
{
|
|
struct dca_domain *domain;
|
|
struct dca_domain *domain;
|
|
@@ -90,9 +148,13 @@ static struct dca_domain *dca_get_domain(struct device *dev)
|
|
domain = dca_find_domain(rc);
|
|
domain = dca_find_domain(rc);
|
|
|
|
|
|
if (!domain) {
|
|
if (!domain) {
|
|
- domain = dca_allocate_domain(rc);
|
|
|
|
- if (domain)
|
|
|
|
- list_add(&domain->node, &dca_domains);
|
|
|
|
|
|
+ if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains)) {
|
|
|
|
+ dca_providers_blocked = 1;
|
|
|
|
+ } else {
|
|
|
|
+ domain = dca_allocate_domain(rc);
|
|
|
|
+ if (domain)
|
|
|
|
+ list_add(&domain->node, &dca_domains);
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
return domain;
|
|
return domain;
|
|
@@ -293,8 +355,6 @@ void free_dca_provider(struct dca_provider *dca)
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(free_dca_provider);
|
|
EXPORT_SYMBOL_GPL(free_dca_provider);
|
|
|
|
|
|
-static BLOCKING_NOTIFIER_HEAD(dca_provider_chain);
|
|
|
|
-
|
|
|
|
/**
|
|
/**
|
|
* register_dca_provider - register a dca provider
|
|
* register_dca_provider - register a dca provider
|
|
* @dca - struct created by alloc_dca_provider()
|
|
* @dca - struct created by alloc_dca_provider()
|
|
@@ -306,6 +366,13 @@ int register_dca_provider(struct dca_provider *dca, struct device *dev)
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
struct dca_domain *domain;
|
|
struct dca_domain *domain;
|
|
|
|
|
|
|
|
+ spin_lock_irqsave(&dca_lock, flags);
|
|
|
|
+ if (dca_providers_blocked) {
|
|
|
|
+ spin_unlock_irqrestore(&dca_lock, flags);
|
|
|
|
+ return -ENODEV;
|
|
|
|
+ }
|
|
|
|
+ spin_unlock_irqrestore(&dca_lock, flags);
|
|
|
|
+
|
|
err = dca_sysfs_add_provider(dca, dev);
|
|
err = dca_sysfs_add_provider(dca, dev);
|
|
if (err)
|
|
if (err)
|
|
return err;
|
|
return err;
|
|
@@ -313,7 +380,13 @@ int register_dca_provider(struct dca_provider *dca, struct device *dev)
|
|
spin_lock_irqsave(&dca_lock, flags);
|
|
spin_lock_irqsave(&dca_lock, flags);
|
|
domain = dca_get_domain(dev);
|
|
domain = dca_get_domain(dev);
|
|
if (!domain) {
|
|
if (!domain) {
|
|
- spin_unlock_irqrestore(&dca_lock, flags);
|
|
|
|
|
|
+ if (dca_providers_blocked) {
|
|
|
|
+ spin_unlock_irqrestore(&dca_lock, flags);
|
|
|
|
+ dca_sysfs_remove_provider(dca);
|
|
|
|
+ unregister_dca_providers();
|
|
|
|
+ } else {
|
|
|
|
+ spin_unlock_irqrestore(&dca_lock, flags);
|
|
|
|
+ }
|
|
return -ENODEV;
|
|
return -ENODEV;
|
|
}
|
|
}
|
|
list_add(&dca->node, &domain->dca_providers);
|
|
list_add(&dca->node, &domain->dca_providers);
|