瀏覽代碼

Merge master.kernel.org:/pub/scm/linux/kernel/git/gregkh/pci-2.6

* master.kernel.org:/pub/scm/linux/kernel/git/gregkh/pci-2.6: (169 commits)
  commit 78a596b4490e17b9990d87b9d468ef5bb70daa10
  Author: Adrian Bunk <bunk@stusta.de>
  Date:   Fri Mar 31 01:38:12 2006 -0800
  
      [PATCH] remove kernel/power/pm.c:pm_unregister()
      
      Since the last user is removed in -mm, we can now remove this long deprecated
      function.
      
      Signed-off-by: Adrian Bunk <bunk@stusta.de>
      Cc: Pavel Machek <pavel@ucw.cz>
      Signed-off-by: Andrew Morton <akpm@osdl.org>
      Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
  
  commit 21440d313358043b0ce5e43b00ff3c9b35a8616c
  Author: David Brownell <david-b@pacbell.net>
  Date:   Sat Apr 1 10:21:52 2006 -0800
  
      [PATCH] dma doc updates
      
  ...
Linus Torvalds 19 年之前
父節點
當前提交
bcdc084257

+ 36 - 13
Documentation/DMA-API.txt

@@ -33,7 +33,9 @@ pci_alloc_consistent(struct pci_dev *dev, size_t size,
 
 
 Consistent memory is memory for which a write by either the device or
 Consistent memory is memory for which a write by either the device or
 the processor can immediately be read by the processor or device
 the processor can immediately be read by the processor or device
-without having to worry about caching effects.
+without having to worry about caching effects.  (You may however need
+to make sure to flush the processor's write buffers before telling
+devices to read that memory.)
 
 
 This routine allocates a region of <size> bytes of consistent memory.
 This routine allocates a region of <size> bytes of consistent memory.
 it also returns a <dma_handle> which may be cast to an unsigned
 it also returns a <dma_handle> which may be cast to an unsigned
@@ -304,12 +306,12 @@ dma address with dma_mapping_error(). A non zero return value means the mapping
 could not be created and the driver should take appropriate action (eg
 could not be created and the driver should take appropriate action (eg
 reduce current DMA mapping usage or delay and try again later).
 reduce current DMA mapping usage or delay and try again later).
 
 
-int
-dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
-	   enum dma_data_direction direction)
-int
-pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
-	   int nents, int direction)
+	int
+	dma_map_sg(struct device *dev, struct scatterlist *sg,
+		int nents, enum dma_data_direction direction)
+	int
+	pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
+		int nents, int direction)
 
 
 Maps a scatter gather list from the block layer.
 Maps a scatter gather list from the block layer.
 
 
@@ -327,12 +329,33 @@ critical that the driver do something, in the case of a block driver
 aborting the request or even oopsing is better than doing nothing and
 aborting the request or even oopsing is better than doing nothing and
 corrupting the filesystem.
 corrupting the filesystem.
 
 
-void
-dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
-	     enum dma_data_direction direction)
-void
-pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
-	     int nents, int direction)
+With scatterlists, you use the resulting mapping like this:
+
+	int i, count = dma_map_sg(dev, sglist, nents, direction);
+	struct scatterlist *sg;
+
+	for (i = 0, sg = sglist; i < count; i++, sg++) {
+		hw_address[i] = sg_dma_address(sg);
+		hw_len[i] = sg_dma_len(sg);
+	}
+
+where nents is the number of entries in the sglist.
+
+The implementation is free to merge several consecutive sglist entries
+into one (e.g. with an IOMMU, or if several pages just happen to be
+physically contiguous) and returns the actual number of sg entries it
+mapped them to. On failure 0, is returned.
+
+Then you should loop count times (note: this can be less than nents times)
+and use sg_dma_address() and sg_dma_len() macros where you previously
+accessed sg->address and sg->length as shown above.
+
+	void
+	dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+		int nhwentries, enum dma_data_direction direction)
+	void
+	pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
+		int nents, int direction)
 
 
 unmap the previously mapped scatter/gather list.  All the parameters
 unmap the previously mapped scatter/gather list.  All the parameters
 must be the same as those and passed in to the scatter/gather mapping
 must be the same as those and passed in to the scatter/gather mapping

+ 17 - 5
Documentation/DMA-mapping.txt

@@ -58,11 +58,15 @@ translating each of those pages back to a kernel address using
 something like __va().  [ EDIT: Update this when we integrate
 something like __va().  [ EDIT: Update this when we integrate
 Gerd Knorr's generic code which does this. ]
 Gerd Knorr's generic code which does this. ]
 
 
-This rule also means that you may not use kernel image addresses
-(ie. items in the kernel's data/text/bss segment, or your driver's)
-nor may you use kernel stack addresses for DMA.  Both of these items
-might be mapped somewhere entirely different than the rest of physical
-memory.
+This rule also means that you may use neither kernel image addresses
+(items in data/text/bss segments), nor module image addresses, nor
+stack addresses for DMA.  These could all be mapped somewhere entirely
+different than the rest of physical memory.  Even if those classes of
+memory could physically work with DMA, you'd need to ensure the I/O
+buffers were cacheline-aligned.  Without that, you'd see cacheline
+sharing problems (data corruption) on CPUs with DMA-incoherent caches.
+(The CPU could write to one word, DMA would write to a different one
+in the same cache line, and one of them could be overwritten.)
 
 
 Also, this means that you cannot take the return of a kmap()
 Also, this means that you cannot take the return of a kmap()
 call and DMA to/from that.  This is similar to vmalloc().
 call and DMA to/from that.  This is similar to vmalloc().
@@ -284,6 +288,11 @@ There are two types of DMA mappings:
 
 
              in order to get correct behavior on all platforms.
              in order to get correct behavior on all platforms.
 
 
+	     Also, on some platforms your driver may need to flush CPU write
+	     buffers in much the same way as it needs to flush write buffers
+	     found in PCI bridges (such as by reading a register's value
+	     after writing it).
+
 - Streaming DMA mappings which are usually mapped for one DMA transfer,
 - Streaming DMA mappings which are usually mapped for one DMA transfer,
   unmapped right after it (unless you use pci_dma_sync_* below) and for which
   unmapped right after it (unless you use pci_dma_sync_* below) and for which
   hardware can optimize for sequential accesses.
   hardware can optimize for sequential accesses.
@@ -303,6 +312,9 @@ There are two types of DMA mappings:
 
 
 Neither type of DMA mapping has alignment restrictions that come
 Neither type of DMA mapping has alignment restrictions that come
 from PCI, although some devices may have such restrictions.
 from PCI, although some devices may have such restrictions.
+Also, systems with caches that aren't DMA-coherent will work better
+when the underlying buffers don't share cache lines with other data.
+
 
 
 		 Using Consistent DMA mappings.
 		 Using Consistent DMA mappings.
 
 

+ 3 - 0
arch/i386/pci/irq.c

@@ -588,7 +588,10 @@ static __init int via_router_probe(struct irq_router *r,
 	case PCI_DEVICE_ID_VIA_82C596:
 	case PCI_DEVICE_ID_VIA_82C596:
 	case PCI_DEVICE_ID_VIA_82C686:
 	case PCI_DEVICE_ID_VIA_82C686:
 	case PCI_DEVICE_ID_VIA_8231:
 	case PCI_DEVICE_ID_VIA_8231:
+	case PCI_DEVICE_ID_VIA_8233A:
 	case PCI_DEVICE_ID_VIA_8235:
 	case PCI_DEVICE_ID_VIA_8235:
+	case PCI_DEVICE_ID_VIA_8237:
+	case PCI_DEVICE_ID_VIA_8237_SATA:
 		/* FIXME: add new ones for 8233/5 */
 		/* FIXME: add new ones for 8233/5 */
 		r->name = "VIA";
 		r->name = "VIA";
 		r->get = pirq_via_get;
 		r->get = pirq_via_get;

+ 0 - 3
drivers/pci/hotplug/rpaphp_core.c

@@ -360,9 +360,6 @@ static int __init rpaphp_init(void)
 	while ((dn = of_find_node_by_type(dn, "pci")))
 	while ((dn = of_find_node_by_type(dn, "pci")))
 		rpaphp_add_slot(dn);
 		rpaphp_add_slot(dn);
 
 
-	if (!num_slots)
-		return -ENODEV;
-
 	return 0;
 	return 0;
 }
 }
 
 

+ 198 - 29
drivers/pci/msi.c

@@ -504,6 +504,201 @@ void pci_scan_msi_device(struct pci_dev *dev)
 		nr_reserved_vectors++;
 		nr_reserved_vectors++;
 }
 }
 
 
+#ifdef CONFIG_PM
+int pci_save_msi_state(struct pci_dev *dev)
+{
+	int pos, i = 0;
+	u16 control;
+	struct pci_cap_saved_state *save_state;
+	u32 *cap;
+
+	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
+	if (pos <= 0 || dev->no_msi)
+		return 0;
+
+	pci_read_config_word(dev, msi_control_reg(pos), &control);
+	if (!(control & PCI_MSI_FLAGS_ENABLE))
+		return 0;
+
+	save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u32) * 5,
+		GFP_KERNEL);
+	if (!save_state) {
+		printk(KERN_ERR "Out of memory in pci_save_msi_state\n");
+		return -ENOMEM;
+	}
+	cap = &save_state->data[0];
+
+	pci_read_config_dword(dev, pos, &cap[i++]);
+	control = cap[0] >> 16;
+	pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, &cap[i++]);
+	if (control & PCI_MSI_FLAGS_64BIT) {
+		pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, &cap[i++]);
+		pci_read_config_dword(dev, pos + PCI_MSI_DATA_64, &cap[i++]);
+	} else
+		pci_read_config_dword(dev, pos + PCI_MSI_DATA_32, &cap[i++]);
+	if (control & PCI_MSI_FLAGS_MASKBIT)
+		pci_read_config_dword(dev, pos + PCI_MSI_MASK_BIT, &cap[i++]);
+	disable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
+	save_state->cap_nr = PCI_CAP_ID_MSI;
+	pci_add_saved_cap(dev, save_state);
+	return 0;
+}
+
+void pci_restore_msi_state(struct pci_dev *dev)
+{
+	int i = 0, pos;
+	u16 control;
+	struct pci_cap_saved_state *save_state;
+	u32 *cap;
+
+	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSI);
+	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
+	if (!save_state || pos <= 0)
+		return;
+	cap = &save_state->data[0];
+
+	control = cap[i++] >> 16;
+	pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, cap[i++]);
+	if (control & PCI_MSI_FLAGS_64BIT) {
+		pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, cap[i++]);
+		pci_write_config_dword(dev, pos + PCI_MSI_DATA_64, cap[i++]);
+	} else
+		pci_write_config_dword(dev, pos + PCI_MSI_DATA_32, cap[i++]);
+	if (control & PCI_MSI_FLAGS_MASKBIT)
+		pci_write_config_dword(dev, pos + PCI_MSI_MASK_BIT, cap[i++]);
+	pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
+	enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
+	pci_remove_saved_cap(save_state);
+	kfree(save_state);
+}
+
+int pci_save_msix_state(struct pci_dev *dev)
+{
+	int pos;
+	u16 control;
+	struct pci_cap_saved_state *save_state;
+
+	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
+	if (pos <= 0 || dev->no_msi)
+		return 0;
+
+	pci_read_config_word(dev, msi_control_reg(pos), &control);
+	if (!(control & PCI_MSIX_FLAGS_ENABLE))
+		return 0;
+	save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u16),
+		GFP_KERNEL);
+	if (!save_state) {
+		printk(KERN_ERR "Out of memory in pci_save_msix_state\n");
+		return -ENOMEM;
+	}
+	*((u16 *)&save_state->data[0]) = control;
+
+	disable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
+	save_state->cap_nr = PCI_CAP_ID_MSIX;
+	pci_add_saved_cap(dev, save_state);
+	return 0;
+}
+
+void pci_restore_msix_state(struct pci_dev *dev)
+{
+	u16 save;
+	int pos;
+	int vector, head, tail = 0;
+	void __iomem *base;
+	int j;
+	struct msg_address address;
+	struct msg_data data;
+	struct msi_desc *entry;
+	int temp;
+	struct pci_cap_saved_state *save_state;
+
+	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSIX);
+	if (!save_state)
+		return;
+	save = *((u16 *)&save_state->data[0]);
+	pci_remove_saved_cap(save_state);
+	kfree(save_state);
+
+	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
+	if (pos <= 0)
+		return;
+
+	/* route the table */
+	temp = dev->irq;
+	if (msi_lookup_vector(dev, PCI_CAP_ID_MSIX))
+		return;
+	vector = head = dev->irq;
+	while (head != tail) {
+		entry = msi_desc[vector];
+		base = entry->mask_base;
+		j = entry->msi_attrib.entry_nr;
+
+		msi_address_init(&address);
+		msi_data_init(&data, vector);
+
+		address.lo_address.value &= MSI_ADDRESS_DEST_ID_MASK;
+		address.lo_address.value |= entry->msi_attrib.current_cpu <<
+					MSI_TARGET_CPU_SHIFT;
+
+		writel(address.lo_address.value,
+			base + j * PCI_MSIX_ENTRY_SIZE +
+			PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
+		writel(address.hi_address,
+			base + j * PCI_MSIX_ENTRY_SIZE +
+			PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
+		writel(*(u32*)&data,
+			base + j * PCI_MSIX_ENTRY_SIZE +
+			PCI_MSIX_ENTRY_DATA_OFFSET);
+
+		tail = msi_desc[vector]->link.tail;
+		vector = tail;
+	}
+	dev->irq = temp;
+
+	pci_write_config_word(dev, msi_control_reg(pos), save);
+	enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
+}
+#endif
+
+static void msi_register_init(struct pci_dev *dev, struct msi_desc *entry)
+{
+	struct msg_address address;
+	struct msg_data data;
+	int pos, vector = dev->irq;
+	u16 control;
+
+   	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
+	pci_read_config_word(dev, msi_control_reg(pos), &control);
+	/* Configure MSI capability structure */
+	msi_address_init(&address);
+	msi_data_init(&data, vector);
+	entry->msi_attrib.current_cpu = ((address.lo_address.u.dest_id >>
+				MSI_TARGET_CPU_SHIFT) & MSI_TARGET_CPU_MASK);
+	pci_write_config_dword(dev, msi_lower_address_reg(pos),
+			address.lo_address.value);
+	if (is_64bit_address(control)) {
+		pci_write_config_dword(dev,
+			msi_upper_address_reg(pos), address.hi_address);
+		pci_write_config_word(dev,
+			msi_data_reg(pos, 1), *((u32*)&data));
+	} else
+		pci_write_config_word(dev,
+			msi_data_reg(pos, 0), *((u32*)&data));
+	if (entry->msi_attrib.maskbit) {
+		unsigned int maskbits, temp;
+		/* All MSIs are unmasked by default, Mask them all */
+		pci_read_config_dword(dev,
+			msi_mask_bits_reg(pos, is_64bit_address(control)),
+			&maskbits);
+		temp = (1 << multi_msi_capable(control));
+		temp = ((temp - 1) & ~temp);
+		maskbits |= temp;
+		pci_write_config_dword(dev,
+			msi_mask_bits_reg(pos, is_64bit_address(control)),
+			maskbits);
+	}
+}
+
 /**
 /**
  * msi_capability_init - configure device's MSI capability structure
  * msi_capability_init - configure device's MSI capability structure
  * @dev: pointer to the pci_dev data structure of MSI device function
  * @dev: pointer to the pci_dev data structure of MSI device function
@@ -516,8 +711,6 @@ void pci_scan_msi_device(struct pci_dev *dev)
 static int msi_capability_init(struct pci_dev *dev)
 static int msi_capability_init(struct pci_dev *dev)
 {
 {
 	struct msi_desc *entry;
 	struct msi_desc *entry;
-	struct msg_address address;
-	struct msg_data data;
 	int pos, vector;
 	int pos, vector;
 	u16 control;
 	u16 control;
 
 
@@ -549,33 +742,8 @@ static int msi_capability_init(struct pci_dev *dev)
 	/* Replace with MSI handler */
 	/* Replace with MSI handler */
 	irq_handler_init(PCI_CAP_ID_MSI, vector, entry->msi_attrib.maskbit);
 	irq_handler_init(PCI_CAP_ID_MSI, vector, entry->msi_attrib.maskbit);
 	/* Configure MSI capability structure */
 	/* Configure MSI capability structure */
-	msi_address_init(&address);
-	msi_data_init(&data, vector);
-	entry->msi_attrib.current_cpu = ((address.lo_address.u.dest_id >>
-				MSI_TARGET_CPU_SHIFT) & MSI_TARGET_CPU_MASK);
-	pci_write_config_dword(dev, msi_lower_address_reg(pos),
-			address.lo_address.value);
-	if (is_64bit_address(control)) {
-		pci_write_config_dword(dev,
-			msi_upper_address_reg(pos), address.hi_address);
-		pci_write_config_word(dev,
-			msi_data_reg(pos, 1), *((u32*)&data));
-	} else
-		pci_write_config_word(dev,
-			msi_data_reg(pos, 0), *((u32*)&data));
-	if (entry->msi_attrib.maskbit) {
-		unsigned int maskbits, temp;
-		/* All MSIs are unmasked by default, Mask them all */
-		pci_read_config_dword(dev,
-			msi_mask_bits_reg(pos, is_64bit_address(control)),
-			&maskbits);
-		temp = (1 << multi_msi_capable(control));
-		temp = ((temp - 1) & ~temp);
-		maskbits |= temp;
-		pci_write_config_dword(dev,
-			msi_mask_bits_reg(pos, is_64bit_address(control)),
-			maskbits);
-	}
+	msi_register_init(dev, entry);
+
 	attach_msi_entry(entry, vector);
 	attach_msi_entry(entry, vector);
 	/* Set MSI enabled bits	 */
 	/* Set MSI enabled bits	 */
 	enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
 	enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
@@ -731,6 +899,7 @@ int pci_enable_msi(struct pci_dev* dev)
 			vector_irq[dev->irq] = -1;
 			vector_irq[dev->irq] = -1;
 			nr_released_vectors--;
 			nr_released_vectors--;
 			spin_unlock_irqrestore(&msi_lock, flags);
 			spin_unlock_irqrestore(&msi_lock, flags);
+			msi_register_init(dev, msi_desc[dev->irq]);
 			enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
 			enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
 			return 0;
 			return 0;
 		}
 		}

+ 6 - 0
drivers/pci/pci.c

@@ -446,6 +446,10 @@ pci_save_state(struct pci_dev *dev)
 	/* XXX: 100% dword access ok here? */
 	/* XXX: 100% dword access ok here? */
 	for (i = 0; i < 16; i++)
 	for (i = 0; i < 16; i++)
 		pci_read_config_dword(dev, i * 4,&dev->saved_config_space[i]);
 		pci_read_config_dword(dev, i * 4,&dev->saved_config_space[i]);
+	if ((i = pci_save_msi_state(dev)) != 0)
+		return i;
+	if ((i = pci_save_msix_state(dev)) != 0)
+		return i;
 	return 0;
 	return 0;
 }
 }
 
 
@@ -460,6 +464,8 @@ pci_restore_state(struct pci_dev *dev)
 
 
 	for (i = 0; i < 16; i++)
 	for (i = 0; i < 16; i++)
 		pci_write_config_dword(dev,i * 4, dev->saved_config_space[i]);
 		pci_write_config_dword(dev,i * 4, dev->saved_config_space[i]);
+	pci_restore_msi_state(dev);
+	pci_restore_msix_state(dev);
 	return 0;
 	return 0;
 }
 }
 
 

+ 11 - 0
drivers/pci/pci.h

@@ -55,6 +55,17 @@ void pci_no_msi(void);
 static inline void disable_msi_mode(struct pci_dev *dev, int pos, int type) { }
 static inline void disable_msi_mode(struct pci_dev *dev, int pos, int type) { }
 static inline void pci_no_msi(void) { }
 static inline void pci_no_msi(void) { }
 #endif
 #endif
+#if defined(CONFIG_PCI_MSI) && defined(CONFIG_PM)
+int pci_save_msi_state(struct pci_dev *dev);
+int pci_save_msix_state(struct pci_dev *dev);
+void pci_restore_msi_state(struct pci_dev *dev);
+void pci_restore_msix_state(struct pci_dev *dev);
+#else
+static inline int pci_save_msi_state(struct pci_dev *dev) { return 0; }
+static inline int pci_save_msix_state(struct pci_dev *dev) { return 0; }
+static inline void pci_restore_msi_state(struct pci_dev *dev) {}
+static inline void pci_restore_msix_state(struct pci_dev *dev) {}
+#endif
 
 
 extern int pcie_mch_quirk;
 extern int pcie_mch_quirk;
 extern struct device_attribute pci_dev_attrs[];
 extern struct device_attribute pci_dev_attrs[];

+ 3 - 1
drivers/pci/quirks.c

@@ -592,7 +592,7 @@ static void __init quirk_amd_8131_ioapic(struct pci_dev *dev)
                 pci_write_config_byte( dev, AMD8131_MISC, tmp);
                 pci_write_config_byte( dev, AMD8131_MISC, tmp);
         }
         }
 } 
 } 
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_APIC,         quirk_amd_8131_ioapic ); 
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_ioapic);
 
 
 static void __init quirk_svw_msi(struct pci_dev *dev)
 static void __init quirk_svw_msi(struct pci_dev *dev)
 {
 {
@@ -921,6 +921,7 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
 		if (dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB) {
 		if (dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB) {
 			switch (dev->subsystem_device) {
 			switch (dev->subsystem_device) {
 			case 0x1882: /* M6V notebook */
 			case 0x1882: /* M6V notebook */
+			case 0x1977: /* A6VA notebook */
 				asus_hides_smbus = 1;
 				asus_hides_smbus = 1;
 			}
 			}
 		}
 		}
@@ -999,6 +1000,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82801BA_0,	asu
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82801CA_12,	asus_hides_smbus_lpc );
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82801CA_12,	asus_hides_smbus_lpc );
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82801DB_12,	asus_hides_smbus_lpc );
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82801DB_12,	asus_hides_smbus_lpc );
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82801EB_0,	asus_hides_smbus_lpc );
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82801EB_0,	asus_hides_smbus_lpc );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH6_1,	asus_hides_smbus_lpc );
 
 
 static void __init asus_hides_smbus_lpc_ich6(struct pci_dev *dev)
 static void __init asus_hides_smbus_lpc_ich6(struct pci_dev *dev)
 {
 {

+ 32 - 1
include/linux/pci.h

@@ -97,7 +97,13 @@ enum pci_channel_state {
 
 
 typedef unsigned short __bitwise pci_bus_flags_t;
 typedef unsigned short __bitwise pci_bus_flags_t;
 enum pci_bus_flags {
 enum pci_bus_flags {
-	PCI_BUS_FLAGS_NO_MSI = (pci_bus_flags_t) 1,
+	PCI_BUS_FLAGS_NO_MSI = (__force pci_bus_flags_t) 1,
+};
+
+struct pci_cap_saved_state {
+	struct hlist_node next;
+	char cap_nr;
+	u32 data[0];
 };
 };
 
 
 /*
 /*
@@ -159,6 +165,7 @@ struct pci_dev {
 	unsigned int	block_ucfg_access:1;	/* userspace config space access is blocked */
 	unsigned int	block_ucfg_access:1;	/* userspace config space access is blocked */
 
 
 	u32		saved_config_space[16]; /* config space saved at suspend time */
 	u32		saved_config_space[16]; /* config space saved at suspend time */
+	struct hlist_head saved_cap_space;
 	struct bin_attribute *rom_attr; /* attribute descriptor for sysfs ROM entry */
 	struct bin_attribute *rom_attr; /* attribute descriptor for sysfs ROM entry */
 	int rom_attr_enabled;		/* has display of the rom attribute been enabled? */
 	int rom_attr_enabled;		/* has display of the rom attribute been enabled? */
 	struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
 	struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
@@ -169,6 +176,30 @@ struct pci_dev {
 #define	to_pci_dev(n) container_of(n, struct pci_dev, dev)
 #define	to_pci_dev(n) container_of(n, struct pci_dev, dev)
 #define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
 #define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
 
 
+static inline struct pci_cap_saved_state *pci_find_saved_cap(
+	struct pci_dev *pci_dev,char cap)
+{
+	struct pci_cap_saved_state *tmp;
+	struct hlist_node *pos;
+
+	hlist_for_each_entry(tmp, pos, &pci_dev->saved_cap_space, next) {
+		if (tmp->cap_nr == cap)
+			return tmp;
+	}
+	return NULL;
+}
+
+static inline void pci_add_saved_cap(struct pci_dev *pci_dev,
+	struct pci_cap_saved_state *new_cap)
+{
+	hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
+}
+
+static inline void pci_remove_saved_cap(struct pci_cap_saved_state *cap)
+{
+	hlist_del(&cap->next);
+}
+
 /*
 /*
  *  For PCI devices, the region numbers are assigned this way:
  *  For PCI devices, the region numbers are assigned this way:
  *
  *

+ 2 - 1
include/linux/pci_ids.h

@@ -497,7 +497,8 @@
 #define PCI_DEVICE_ID_AMD_8111_SMBUS	0x746b
 #define PCI_DEVICE_ID_AMD_8111_SMBUS	0x746b
 #define PCI_DEVICE_ID_AMD_8111_AUDIO	0x746d
 #define PCI_DEVICE_ID_AMD_8111_AUDIO	0x746d
 #define PCI_DEVICE_ID_AMD_8151_0	0x7454
 #define PCI_DEVICE_ID_AMD_8151_0	0x7454
-#define PCI_DEVICE_ID_AMD_8131_APIC     0x7450
+#define PCI_DEVICE_ID_AMD_8131_BRIDGE	0x7450
+#define PCI_DEVICE_ID_AMD_8131_APIC	0x7451
 #define PCI_DEVICE_ID_AMD_CS5536_ISA    0x2090
 #define PCI_DEVICE_ID_AMD_CS5536_ISA    0x2090
 #define PCI_DEVICE_ID_AMD_CS5536_FLASH  0x2091
 #define PCI_DEVICE_ID_AMD_CS5536_FLASH  0x2091
 #define PCI_DEVICE_ID_AMD_CS5536_AUDIO  0x2093
 #define PCI_DEVICE_ID_AMD_CS5536_AUDIO  0x2093

+ 0 - 7
include/linux/pm_legacy.h

@@ -15,11 +15,6 @@ extern int pm_active;
 struct pm_dev __deprecated *
 struct pm_dev __deprecated *
 pm_register(pm_dev_t type, unsigned long id, pm_callback callback);
 pm_register(pm_dev_t type, unsigned long id, pm_callback callback);
 
 
-/*
- * Unregister a device with power management
- */
-void __deprecated pm_unregister(struct pm_dev *dev);
-
 /*
 /*
  * Unregister all devices with matching callback
  * Unregister all devices with matching callback
  */
  */
@@ -41,8 +36,6 @@ static inline struct pm_dev *pm_register(pm_dev_t type,
 	return NULL;
 	return NULL;
 }
 }
 
 
-static inline void pm_unregister(struct pm_dev *dev) {}
-
 static inline void pm_unregister_all(pm_callback callback) {}
 static inline void pm_unregister_all(pm_callback callback) {}
 
 
 static inline int pm_send_all(pm_request_t rqst, void *data)
 static inline int pm_send_all(pm_request_t rqst, void *data)

+ 0 - 20
kernel/power/pm.c

@@ -75,25 +75,6 @@ struct pm_dev *pm_register(pm_dev_t type,
 	return dev;
 	return dev;
 }
 }
 
 
-/**
- *	pm_unregister -  unregister a device with power management
- *	@dev: device to unregister
- *
- *	Remove a device from the power management notification lists. The
- *	dev passed must be a handle previously returned by pm_register.
- */
- 
-void pm_unregister(struct pm_dev *dev)
-{
-	if (dev) {
-		mutex_lock(&pm_devs_lock);
-		list_del(&dev->entry);
-		mutex_unlock(&pm_devs_lock);
-
-		kfree(dev);
-	}
-}
-
 static void __pm_unregister(struct pm_dev *dev)
 static void __pm_unregister(struct pm_dev *dev)
 {
 {
 	if (dev) {
 	if (dev) {
@@ -258,7 +239,6 @@ int pm_send_all(pm_request_t rqst, void *data)
 }
 }
 
 
 EXPORT_SYMBOL(pm_register);
 EXPORT_SYMBOL(pm_register);
-EXPORT_SYMBOL(pm_unregister);
 EXPORT_SYMBOL(pm_unregister_all);
 EXPORT_SYMBOL(pm_unregister_all);
 EXPORT_SYMBOL(pm_send_all);
 EXPORT_SYMBOL(pm_send_all);
 EXPORT_SYMBOL(pm_active);
 EXPORT_SYMBOL(pm_active);