Jeff Garzik 19 жил өмнө
parent
commit
ccd7bc2f67
54 өөрчлөгдсөн 677 нэмэгдсэн , 496 устгасан
  1. 251 231
      Documentation/kernel-parameters.txt
  2. 8 7
      arch/arm/mach-integrator/impd1.c
  3. 20 0
      arch/arm/mach-pxa/generic.c
  4. 3 3
      arch/arm/mach-s3c2410/mach-bast.c
  5. 19 11
      arch/i386/kernel/cpu/cpufreq/powernow-k8.c
  6. 1 1
      arch/ppc64/kernel/iSeries_htab.c
  7. 1 1
      arch/ppc64/kernel/time.c
  8. 1 2
      arch/ppc64/mm/init.c
  9. 1 1
      arch/um/include/sysdep-i386/thread.h
  10. 1 1
      arch/um/include/sysdep-x86_64/thread.h
  11. 2 1
      drivers/char/drm/drm_vm.c
  12. 1 1
      drivers/char/drm/mga_drv.h
  13. 1 1
      drivers/char/drm/mga_state.c
  14. 11 10
      drivers/infiniband/hw/mthca/mthca_eq.c
  15. 0 1
      drivers/media/video/Kconfig
  16. 8 4
      drivers/message/fusion/mptsas.c
  17. 88 13
      drivers/pci/quirks.c
  18. 1 1
      drivers/pci/setup-bus.c
  19. 1 1
      drivers/scsi/NCR5380.c
  20. 1 1
      drivers/scsi/aacraid/aacraid.h
  21. 2 1
      drivers/scsi/qla2xxx/qla_os.c
  22. 1 0
      drivers/scsi/scsi_devinfo.c
  23. 4 3
      drivers/scsi/scsi_lib.c
  24. 10 3
      drivers/scsi/scsi_transport_fc.c
  25. 8 0
      drivers/video/fbsysfs.c
  26. 6 0
      fs/aio.c
  27. 1 0
      fs/inotify.c
  28. 10 4
      include/asm-alpha/atomic.h
  29. 34 0
      include/asm-alpha/barrier.h
  30. 1 30
      include/asm-alpha/system.h
  31. 17 4
      include/asm-arm/arch-s3c2410/regs-clock.h
  32. 3 13
      include/linux/hugetlb.h
  33. 1 0
      include/linux/idr.h
  34. 1 1
      include/net/ax25.h
  35. 3 1
      include/net/llc_pdu.h
  36. 1 8
      kernel/exit.c
  37. 67 52
      kernel/posix-cpu-timers.c
  38. 1 1
      kernel/posix-timers.c
  39. 1 13
      kernel/signal.c
  40. 13 0
      lib/idr.c
  41. 22 0
      mm/hugetlb.c
  42. 2 12
      mm/memory.c
  43. 3 2
      net/802/tr.c
  44. 16 32
      net/core/neighbour.c
  45. 6 0
      net/core/skbuff.c
  46. 2 0
      net/dccp/ipv4.c
  47. 5 5
      net/dccp/output.c
  48. 0 2
      net/dccp/proto.c
  49. 3 0
      net/ipv4/ip_output.c
  50. 1 11
      net/ipv4/tcp_output.c
  51. 1 1
      net/ipv6/ip6_flowlabel.c
  52. 3 1
      security/selinux/selinuxfs.c
  53. 4 2
      security/selinux/ss/policydb.c
  54. 4 2
      sound/core/init.c

Файлын зөрүү хэтэрхий том тул дарагдсан байна
+ 251 - 231
Documentation/kernel-parameters.txt


+ 8 - 7
arch/arm/mach-integrator/impd1.c

@@ -67,7 +67,7 @@ static void impd1_setvco(struct clk *clk, struct icst525_vco vco)
 	}
 	writel(0, impd1->base + IMPD1_LOCK);
 
-#if DEBUG
+#ifdef DEBUG
 	vco.v = val & 0x1ff;
 	vco.r = (val >> 9) & 0x7f;
 	vco.s = (val >> 16) & 7;
@@ -427,17 +427,18 @@ static int impd1_probe(struct lm_device *dev)
 	return ret;
 }
 
+static int impd1_remove_one(struct device *dev, void *data)
+{
+	device_unregister(dev);
+	return 0;
+}
+
 static void impd1_remove(struct lm_device *dev)
 {
 	struct impd1_module *impd1 = lm_get_drvdata(dev);
-	struct list_head *l, *n;
 	int i;
 
-	list_for_each_safe(l, n, &dev->dev.children) {
-		struct device *d = list_to_dev(l);
-
-		device_unregister(d);
-	}
+	device_for_each_child(&dev->dev, NULL, impd1_remove_one);
 
 	for (i = 0; i < ARRAY_SIZE(impd1->vcos); i++)
 		clk_unregister(&impd1->vcos[i]);

+ 20 - 0
arch/arm/mach-pxa/generic.c

@@ -250,6 +250,25 @@ void __init pxa_set_i2c_info(struct i2c_pxa_platform_data *info)
 	i2c_device.dev.platform_data = info;
 }
 
+static struct resource i2s_resources[] = {
+	{
+		.start	= 0x40400000,
+		.end	= 0x40400083,
+		.flags	= IORESOURCE_MEM,
+	}, {
+		.start	= IRQ_I2S,
+		.end	= IRQ_I2S,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device i2s_device = {
+	.name		= "pxa2xx-i2s",
+	.id		= -1,
+	.resource	= i2c_resources,
+	.num_resources	= ARRAY_SIZE(i2s_resources),
+};
+
 static struct platform_device *devices[] __initdata = {
 	&pxamci_device,
 	&udc_device,
@@ -258,6 +277,7 @@ static struct platform_device *devices[] __initdata = {
 	&btuart_device,
 	&stuart_device,
 	&i2c_device,
+	&i2s_device,
 };
 
 static int __init pxa_init(void)

+ 3 - 3
arch/arm/mach-s3c2410/mach-bast.c

@@ -307,9 +307,9 @@ static void bast_nand_select(struct s3c2410_nand_set *set, int slot)
 }
 
 static struct s3c2410_platform_nand bast_nand_info = {
-	.tacls		= 40,
-	.twrph0		= 80,
-	.twrph1		= 80,
+	.tacls		= 30,
+	.twrph0		= 60,
+	.twrph1		= 60,
 	.nr_sets	= ARRAY_SIZE(bast_nand_sets),
 	.sets		= bast_nand_sets,
 	.select_chip	= bast_nand_select,

+ 19 - 11
arch/i386/kernel/cpu/cpufreq/powernow-k8.c

@@ -44,7 +44,7 @@
 
 #define PFX "powernow-k8: "
 #define BFX PFX "BIOS error: "
-#define VERSION "version 1.50.3"
+#define VERSION "version 1.50.4"
 #include "powernow-k8.h"
 
 /* serialize freq changes  */
@@ -111,8 +111,8 @@ static int query_current_values_with_pending_wait(struct powernow_k8_data *data)
 	u32 i = 0;
 
 	do {
-		if (i++ > 0x1000000) {
-			printk(KERN_ERR PFX "detected change pending stuck\n");
+		if (i++ > 10000) {
+			dprintk("detected change pending stuck\n");
 			return 1;
 		}
 		rdmsr(MSR_FIDVID_STATUS, lo, hi);
@@ -159,6 +159,7 @@ static int write_new_fid(struct powernow_k8_data *data, u32 fid)
 {
 	u32 lo;
 	u32 savevid = data->currvid;
+	u32 i = 0;
 
 	if ((fid & INVALID_FID_MASK) || (data->currvid & INVALID_VID_MASK)) {
 		printk(KERN_ERR PFX "internal error - overflow on fid write\n");
@@ -170,10 +171,13 @@ static int write_new_fid(struct powernow_k8_data *data, u32 fid)
 	dprintk("writing fid 0x%x, lo 0x%x, hi 0x%x\n",
 		fid, lo, data->plllock * PLL_LOCK_CONVERSION);
 
-	wrmsr(MSR_FIDVID_CTL, lo, data->plllock * PLL_LOCK_CONVERSION);
-
-	if (query_current_values_with_pending_wait(data))
-		return 1;
+	do {
+		wrmsr(MSR_FIDVID_CTL, lo, data->plllock * PLL_LOCK_CONVERSION);
+		if (i++ > 100) {
+			printk(KERN_ERR PFX "internal error - pending bit very stuck - no further pstate changes possible\n");
+			return 1;
+		}			
+	} while (query_current_values_with_pending_wait(data));
 
 	count_off_irt(data);
 
@@ -197,6 +201,7 @@ static int write_new_vid(struct powernow_k8_data *data, u32 vid)
 {
 	u32 lo;
 	u32 savefid = data->currfid;
+	int i = 0;
 
 	if ((data->currfid & INVALID_FID_MASK) || (vid & INVALID_VID_MASK)) {
 		printk(KERN_ERR PFX "internal error - overflow on vid write\n");
@@ -208,10 +213,13 @@ static int write_new_vid(struct powernow_k8_data *data, u32 vid)
 	dprintk("writing vid 0x%x, lo 0x%x, hi 0x%x\n",
 		vid, lo, STOP_GRANT_5NS);
 
-	wrmsr(MSR_FIDVID_CTL, lo, STOP_GRANT_5NS);
-
-	if (query_current_values_with_pending_wait(data))
-		return 1;
+	do {
+		wrmsr(MSR_FIDVID_CTL, lo, STOP_GRANT_5NS);
+                if (i++ > 100) {
+                        printk(KERN_ERR PFX "internal error - pending bit very stuck - no further pstate changes possible\n");
+                        return 1;
+                }
+	} while (query_current_values_with_pending_wait(data));
 
 	if (savefid != data->currfid) {
 		printk(KERN_ERR PFX "fid changed on vid trans, old 0x%x new 0x%x\n",

+ 1 - 1
arch/ppc64/kernel/iSeries_htab.c

@@ -66,7 +66,7 @@ static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
 	}
 
 	if (slot < 0) {		/* MSB set means secondary group */
-		vflags |= HPTE_V_VALID;
+		vflags |= HPTE_V_SECONDARY;
 		secondary = 1;
 		slot &= 0x7fffffffffffffff;
 	}

+ 1 - 1
arch/ppc64/kernel/time.c

@@ -870,7 +870,7 @@ void div128_by_32( unsigned long dividend_high, unsigned long dividend_low,
 	rb = ((ra + b) - (x * divisor)) << 32;
 
 	y = (rb + c)/divisor;
-	rc = ((rb + b) - (y * divisor)) << 32;
+	rc = ((rb + c) - (y * divisor)) << 32;
 
 	z = (rc + d)/divisor;
 

+ 1 - 2
arch/ppc64/mm/init.c

@@ -799,8 +799,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea,
 	if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp))
 		local = 1;
 
-	__hash_page(ea, pte_val(pte) & (_PAGE_USER|_PAGE_RW), vsid, ptep,
-		    0x300, local);
+	__hash_page(ea, 0, vsid, ptep, 0x300, local);
 	local_irq_restore(flags);
 }
 

+ 1 - 1
arch/um/include/sysdep-i386/thread.h

@@ -4,7 +4,7 @@
 #include <kern_constants.h>
 
 #define TASK_DEBUGREGS(task) ((unsigned long *) &(((char *) (task))[HOST_TASK_DEBUGREGS]))
-#ifdef CONFIG_MODE_TT
+#ifdef UML_CONFIG_MODE_TT
 #define TASK_EXTERN_PID(task) *((int *) &(((char *) (task))[HOST_TASK_EXTERN_PID]))
 #endif
 

+ 1 - 1
arch/um/include/sysdep-x86_64/thread.h

@@ -3,7 +3,7 @@
 
 #include <kern_constants.h>
 
-#ifdef CONFIG_MODE_TT
+#ifdef UML_CONFIG_MODE_TT
 #define TASK_EXTERN_PID(task) *((int *) &(((char *) (task))[HOST_TASK_EXTERN_PID]))
 #endif
 

+ 2 - 1
drivers/char/drm/drm_vm.c

@@ -148,7 +148,8 @@ static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
 
 	offset	 = address - vma->vm_start;
 	i = (unsigned long)map->handle + offset;
-	page = vmalloc_to_page((void *)i);
+	page = (map->type == _DRM_CONSISTENT) ?
+		virt_to_page((void *)i) : vmalloc_to_page((void *)i);
 	if (!page)
 		return NOPAGE_OOM;
 	get_page(page);

+ 1 - 1
drivers/char/drm/mga_drv.h

@@ -227,7 +227,7 @@ static inline u32 _MGA_READ(u32 *addr)
 #define MGA_EMIT_STATE( dev_priv, dirty )				\
 do {									\
 	if ( (dirty) & ~MGA_UPLOAD_CLIPRECTS ) {			\
-		if ( dev_priv->chipset == MGA_CARD_TYPE_G400 ) {	\
+		if ( dev_priv->chipset >= MGA_CARD_TYPE_G400 ) {	\
 			mga_g400_emit_state( dev_priv );		\
 		} else {						\
 			mga_g200_emit_state( dev_priv );		\

+ 1 - 1
drivers/char/drm/mga_state.c

@@ -53,7 +53,7 @@ static void mga_emit_clip_rect( drm_mga_private_t *dev_priv,
 
 	/* Force reset of DWGCTL on G400 (eliminates clip disable bit).
 	 */
-	if (dev_priv->chipset == MGA_CARD_TYPE_G400) {
+	if (dev_priv->chipset >= MGA_CARD_TYPE_G400) {
 		DMA_BLOCK(MGA_DWGCTL, ctx->dwgctl,
 			  MGA_LEN + MGA_EXEC, 0x80000000,
 			  MGA_DWGCTL, ctx->dwgctl,

+ 11 - 10
drivers/infiniband/hw/mthca/mthca_eq.c

@@ -396,20 +396,21 @@ static irqreturn_t mthca_tavor_interrupt(int irq, void *dev_ptr, struct pt_regs
 		writel(dev->eq_table.clr_mask, dev->eq_table.clr_int);
 
 	ecr = readl(dev->eq_regs.tavor.ecr_base + 4);
-	if (ecr) {
-		writel(ecr, dev->eq_regs.tavor.ecr_base +
-		       MTHCA_ECR_CLR_BASE - MTHCA_ECR_BASE + 4);
+	if (!ecr)
+		return IRQ_NONE;
 
-		for (i = 0; i < MTHCA_NUM_EQ; ++i)
-			if (ecr & dev->eq_table.eq[i].eqn_mask &&
-			    mthca_eq_int(dev, &dev->eq_table.eq[i])) {
+	writel(ecr, dev->eq_regs.tavor.ecr_base +
+	       MTHCA_ECR_CLR_BASE - MTHCA_ECR_BASE + 4);
+
+	for (i = 0; i < MTHCA_NUM_EQ; ++i)
+		if (ecr & dev->eq_table.eq[i].eqn_mask) {
+			if (mthca_eq_int(dev, &dev->eq_table.eq[i]))
 				tavor_set_eq_ci(dev, &dev->eq_table.eq[i],
 						dev->eq_table.eq[i].cons_index);
-				tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn);
-			}
-	}
+			tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn);
+		}
 
-	return IRQ_RETVAL(ecr);
+	return IRQ_HANDLED;
 }
 
 static irqreturn_t mthca_tavor_msi_x_interrupt(int irq, void *eq_ptr,

+ 0 - 1
drivers/media/video/Kconfig

@@ -262,7 +262,6 @@ config VIDEO_SAA7134_DVB
 	depends on VIDEO_SAA7134 && DVB_CORE
 	select VIDEO_BUF_DVB
 	select DVB_MT352
-	select DVB_CX22702
 	select DVB_TDA1004X
 	---help---
 	  This adds support for DVB cards based on the

+ 8 - 4
drivers/message/fusion/mptsas.c

@@ -257,8 +257,8 @@ static void mptsas_print_device_pg0(SasDevicePage0_t *pg0)
 	printk("SAS Address=0x%llX\n", le64_to_cpu(sas_address));
 	printk("Target ID=0x%X\n", pg0->TargetID);
 	printk("Bus=0x%X\n", pg0->Bus);
-	printk("PhyNum=0x%X\n", pg0->PhyNum);
-	printk("AccessStatus=0x%X\n", le16_to_cpu(pg0->AccessStatus));
+	printk("Parent Phy Num=0x%X\n", pg0->PhyNum);
+	printk("Access Status=0x%X\n", le16_to_cpu(pg0->AccessStatus));
 	printk("Device Info=0x%X\n", le32_to_cpu(pg0->DeviceInfo));
 	printk("Flags=0x%X\n", le16_to_cpu(pg0->Flags));
 	printk("Physical Port=0x%X\n", pg0->PhysicalPort);
@@ -270,7 +270,7 @@ static void mptsas_print_expander_pg1(SasExpanderPage1_t *pg1)
 	printk("---- SAS EXPANDER PAGE 1 ------------\n");
 
 	printk("Physical Port=0x%X\n", pg1->PhysicalPort);
-	printk("PHY Identifier=0x%X\n", pg1->Phy);
+	printk("PHY Identifier=0x%X\n", pg1->PhyIdentifier);
 	printk("Negotiated Link Rate=0x%X\n", pg1->NegotiatedLinkRate);
 	printk("Programmed Link Rate=0x%X\n", pg1->ProgrammedLinkRate);
 	printk("Hardware Link Rate=0x%X\n", pg1->HwLinkRate);
@@ -604,7 +604,7 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
 	mptsas_print_expander_pg1(buffer);
 
 	/* save config data */
-	phy_info->phy_id = buffer->Phy;
+	phy_info->phy_id = buffer->PhyIdentifier;
 	phy_info->port_id = buffer->PhysicalPort;
 	phy_info->negotiated_link_rate = buffer->NegotiatedLinkRate;
 	phy_info->programmed_link_rate = buffer->ProgrammedLinkRate;
@@ -825,6 +825,8 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc, int *index)
 		mptsas_sas_device_pg0(ioc, &port_info->phy_info[i].identify,
 			(MPI_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE <<
 			 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), handle);
+		port_info->phy_info[i].identify.phy_id =
+		    port_info->phy_info[i].phy_id;
 		handle = port_info->phy_info[i].identify.handle;
 
 		if (port_info->phy_info[i].attached.handle) {
@@ -881,6 +883,8 @@ mptsas_probe_expander_phys(MPT_ADAPTER *ioc, u32 *handle, int *index)
 				(MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
 				 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
 				port_info->phy_info[i].identify.handle);
+			port_info->phy_info[i].identify.phy_id =
+			    port_info->phy_info[i].phy_id;
 		}
 
 		if (port_info->phy_info[i].attached.handle) {

+ 88 - 13
drivers/pci/quirks.c

@@ -241,7 +241,8 @@ static void __devinit quirk_s3_64M(struct pci_dev *dev)
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3,	PCI_DEVICE_ID_S3_868,		quirk_s3_64M );
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3,	PCI_DEVICE_ID_S3_968,		quirk_s3_64M );
 
-static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region, unsigned size, int nr)
+static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region,
+	unsigned size, int nr, const char *name)
 {
 	region &= ~(size-1);
 	if (region) {
@@ -259,6 +260,7 @@ static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region, unsi
 		pcibios_bus_to_resource(dev, res, &bus_region);
 
 		pci_claim_resource(dev, nr);
+		printk("PCI quirk: region %04x-%04x claimed by %s\n", region, region + size - 1, name);
 	}
 }	
 
@@ -291,25 +293,98 @@ static void __devinit quirk_ali7101_acpi(struct pci_dev *dev)
 	u16 region;
 
 	pci_read_config_word(dev, 0xE0, &region);
-	quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES);
+	quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES, "ali7101 ACPI");
 	pci_read_config_word(dev, 0xE2, &region);
-	quirk_io_region(dev, region, 32, PCI_BRIDGE_RESOURCES+1);
+	quirk_io_region(dev, region, 32, PCI_BRIDGE_RESOURCES+1, "ali7101 SMB");
 }
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL,	PCI_DEVICE_ID_AL_M7101,		quirk_ali7101_acpi );
 
+static void piix4_io_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable)
+{
+	u32 devres;
+	u32 mask, size, base;
+
+	pci_read_config_dword(dev, port, &devres);
+	if ((devres & enable) != enable)
+		return;
+	mask = (devres >> 16) & 15;
+	base = devres & 0xffff;
+	size = 16;
+	for (;;) {
+		unsigned bit = size >> 1;
+		if ((bit & mask) == bit)
+			break;
+		size = bit;
+	}
+	/*
+	 * For now we only print it out. Eventually we'll want to
+	 * reserve it (at least if it's in the 0x1000+ range), but
+	 * let's get enough confirmation reports first. 
+	 */
+	base &= -size;
+	printk("%s PIO at %04x-%04x\n", name, base, base + size - 1);
+}
+
+static void piix4_mem_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable)
+{
+	u32 devres;
+	u32 mask, size, base;
+
+	pci_read_config_dword(dev, port, &devres);
+	if ((devres & enable) != enable)
+		return;
+	base = devres & 0xffff0000;
+	mask = (devres & 0x3f) << 16;
+	size = 128 << 16;
+	for (;;) {
+		unsigned bit = size >> 1;
+		if ((bit & mask) == bit)
+			break;
+		size = bit;
+	}
+	/*
+	 * For now we only print it out. Eventually we'll want to
+	 * reserve it, but let's get enough confirmation reports first. 
+	 */
+	base &= -size;
+	printk("%s MMIO at %04x-%04x\n", name, base, base + size - 1);
+}
+
 /*
  * PIIX4 ACPI: Two IO regions pointed to by longwords at
  *	0x40 (64 bytes of ACPI registers)
  *	0x90 (32 bytes of SMB registers)
+ * and a few strange programmable PIIX4 device resources.
  */
 static void __devinit quirk_piix4_acpi(struct pci_dev *dev)
 {
-	u32 region;
+	u32 region, res_a;
 
 	pci_read_config_dword(dev, 0x40, &region);
-	quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES);
+	quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES, "PIIX4 ACPI");
 	pci_read_config_dword(dev, 0x90, &region);
-	quirk_io_region(dev, region, 32, PCI_BRIDGE_RESOURCES+1);
+	quirk_io_region(dev, region, 32, PCI_BRIDGE_RESOURCES+1, "PIIX4 SMB");
+
+	/* Device resource A has enables for some of the other ones */
+	pci_read_config_dword(dev, 0x5c, &res_a);
+
+	piix4_io_quirk(dev, "PIIX4 devres B", 0x60, 3 << 21);
+	piix4_io_quirk(dev, "PIIX4 devres C", 0x64, 3 << 21);
+
+	/* Device resource D is just bitfields for static resources */
+
+	/* Device 12 enabled? */
+	if (res_a & (1 << 29)) {
+		piix4_io_quirk(dev, "PIIX4 devres E", 0x68, 1 << 20);
+		piix4_mem_quirk(dev, "PIIX4 devres F", 0x6c, 1 << 7);
+	}
+	/* Device 13 enabled? */
+	if (res_a & (1 << 30)) {
+		piix4_io_quirk(dev, "PIIX4 devres G", 0x70, 1 << 20);
+		piix4_mem_quirk(dev, "PIIX4 devres H", 0x74, 1 << 7);
+	}
+	piix4_io_quirk(dev, "PIIX4 devres I", 0x78, 1 << 20);
+	piix4_io_quirk(dev, "PIIX4 devres J", 0x7c, 1 << 20);
 }
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82371AB_3,	quirk_piix4_acpi );
 
@@ -323,10 +398,10 @@ static void __devinit quirk_ich4_lpc_acpi(struct pci_dev *dev)
 	u32 region;
 
 	pci_read_config_dword(dev, 0x40, &region);
-	quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES);
+	quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES, "ICH4 ACPI/GPIO/TCO");
 
 	pci_read_config_dword(dev, 0x58, &region);
-	quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1);
+	quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH4 GPIO");
 }
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82801AA_0,		quirk_ich4_lpc_acpi );
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82801AB_0,		quirk_ich4_lpc_acpi );
@@ -352,7 +427,7 @@ static void __devinit quirk_vt82c586_acpi(struct pci_dev *dev)
 	if (rev & 0x10) {
 		pci_read_config_dword(dev, 0x48, &region);
 		region &= PCI_BASE_ADDRESS_IO_MASK;
-		quirk_io_region(dev, region, 256, PCI_BRIDGE_RESOURCES);
+		quirk_io_region(dev, region, 256, PCI_BRIDGE_RESOURCES, "vt82c586 ACPI");
 	}
 }
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C586_3,	quirk_vt82c586_acpi );
@@ -372,11 +447,11 @@ static void __devinit quirk_vt82c686_acpi(struct pci_dev *dev)
 
 	pci_read_config_word(dev, 0x70, &hm);
 	hm &= PCI_BASE_ADDRESS_IO_MASK;
-	quirk_io_region(dev, hm, 128, PCI_BRIDGE_RESOURCES + 1);
+	quirk_io_region(dev, hm, 128, PCI_BRIDGE_RESOURCES + 1, "vt82c868 HW-mon");
 
 	pci_read_config_dword(dev, 0x90, &smb);
 	smb &= PCI_BASE_ADDRESS_IO_MASK;
-	quirk_io_region(dev, smb, 16, PCI_BRIDGE_RESOURCES + 2);
+	quirk_io_region(dev, smb, 16, PCI_BRIDGE_RESOURCES + 2, "vt82c868 SMB");
 }
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C686_4,	quirk_vt82c686_acpi );
 
@@ -391,11 +466,11 @@ static void __devinit quirk_vt8235_acpi(struct pci_dev *dev)
 
 	pci_read_config_word(dev, 0x88, &pm);
 	pm &= PCI_BASE_ADDRESS_IO_MASK;
-	quirk_io_region(dev, pm, 128, PCI_BRIDGE_RESOURCES);
+	quirk_io_region(dev, pm, 128, PCI_BRIDGE_RESOURCES, "vt8235 PM");
 
 	pci_read_config_word(dev, 0xd0, &smb);
 	smb &= PCI_BASE_ADDRESS_IO_MASK;
-	quirk_io_region(dev, smb, 16, PCI_BRIDGE_RESOURCES + 1);
+	quirk_io_region(dev, smb, 16, PCI_BRIDGE_RESOURCES + 1, "vt8235 SMB");
 }
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8235,	quirk_vt8235_acpi);
 

+ 1 - 1
drivers/pci/setup-bus.c

@@ -40,7 +40,7 @@
  * FIXME: IO should be max 256 bytes.  However, since we may
  * have a P2P bridge below a cardbus bridge, we need 4K.
  */
-#define CARDBUS_IO_SIZE		(4*1024)
+#define CARDBUS_IO_SIZE		(256)
 #define CARDBUS_MEM_SIZE	(32*1024*1024)
 
 static void __devinit

+ 1 - 1
drivers/scsi/NCR5380.c

@@ -91,7 +91,7 @@
 #ifndef NDEBUG
 #define NDEBUG 0
 #endif
-#ifndef NDEBUG
+#ifndef NDEBUG_ABORT
 #define NDEBUG_ABORT 0
 #endif
 

+ 1 - 1
drivers/scsi/aacraid/aacraid.h

@@ -19,7 +19,7 @@
 #define AAC_MAX_LUN		(8)
 
 #define AAC_MAX_HOSTPHYSMEMPAGES (0xfffff)
-#define AAC_MAX_32BIT_SGBCOUNT	((unsigned short)512)
+#define AAC_MAX_32BIT_SGBCOUNT	((unsigned short)256)
 
 /*
  * These macros convert from physical channels to virtual channels

+ 2 - 1
drivers/scsi/qla2xxx/qla_os.c

@@ -1325,6 +1325,8 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
 	ha->brd_info = brd_info;
 	sprintf(ha->host_str, "%s_%ld", ha->brd_info->drv_name, ha->host_no);
 
+	ha->dpc_pid = -1;
+
 	/* Configure PCI I/O space */
 	ret = qla2x00_iospace_config(ha);
 	if (ret)
@@ -1448,7 +1450,6 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
 	 */
 	spin_lock_init(&ha->mbx_reg_lock);
 
-	ha->dpc_pid = -1;
 	init_completion(&ha->dpc_inited);
 	init_completion(&ha->dpc_exited);
 

+ 1 - 0
drivers/scsi/scsi_devinfo.c

@@ -185,6 +185,7 @@ static struct {
 	{"PIONEER", "CD-ROM DRM-600", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
 	{"PIONEER", "CD-ROM DRM-602X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
 	{"PIONEER", "CD-ROM DRM-604X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
+	{"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
 	{"REGAL", "CDC-4X", NULL, BLIST_MAX5LUN | BLIST_SINGLELUN},
 	{"SanDisk", "ImageMate CF-SD1", NULL, BLIST_FORCELUN},
 	{"SEAGATE", "ST34555N", "0930", BLIST_NOTQ},	/* Chokes on tagged INQUIRY */

+ 4 - 3
drivers/scsi/scsi_lib.c

@@ -97,7 +97,6 @@ int scsi_insert_special_req(struct scsi_request *sreq, int at_head)
 }
 
 static void scsi_run_queue(struct request_queue *q);
-static void scsi_release_buffers(struct scsi_cmnd *cmd);
 
 /*
  * Function:	scsi_unprep_request()
@@ -1040,8 +1039,10 @@ static int scsi_init_io(struct scsi_cmnd *cmd)
 	 * if sg table allocation fails, requeue request later.
 	 */
 	sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
-	if (unlikely(!sgpnt))
+	if (unlikely(!sgpnt)) {
+		scsi_unprep_request(req);
 		return BLKPREP_DEFER;
+	}
 
 	cmd->request_buffer = (char *) sgpnt;
 	cmd->request_bufflen = req->nr_sectors << 9;
@@ -1245,8 +1246,8 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
 		 */
 		ret = scsi_init_io(cmd);
 		switch(ret) {
+			/* For BLKPREP_KILL/DEFER the cmd was released */
 		case BLKPREP_KILL:
-			/* BLKPREP_KILL return also releases the command */
 			goto kill;
 		case BLKPREP_DEFER:
 			goto defer;

+ 10 - 3
drivers/scsi/scsi_transport_fc.c

@@ -819,12 +819,15 @@ show_fc_private_host_tgtid_bind_type(struct class_device *cdev, char *buf)
 	return snprintf(buf, FC_BINDTYPE_MAX_NAMELEN, "%s\n", name);
 }
 
+#define get_list_head_entry(pos, head, member) 		\
+	pos = list_entry((head)->next, typeof(*pos), member)
+
 static ssize_t
 store_fc_private_host_tgtid_bind_type(struct class_device *cdev,
 	const char *buf, size_t count)
 {
 	struct Scsi_Host *shost = transport_class_to_shost(cdev);
-	struct fc_rport *rport, *next_rport;
+	struct fc_rport *rport;
  	enum fc_tgtid_binding_type val;
 	unsigned long flags;
 
@@ -834,9 +837,13 @@ store_fc_private_host_tgtid_bind_type(struct class_device *cdev,
 	/* if changing bind type, purge all unused consistent bindings */
 	if (val != fc_host_tgtid_bind_type(shost)) {
 		spin_lock_irqsave(shost->host_lock, flags);
-		list_for_each_entry_safe(rport, next_rport,
-				&fc_host_rport_bindings(shost), peers)
+		while (!list_empty(&fc_host_rport_bindings(shost))) {
+			get_list_head_entry(rport,
+				&fc_host_rport_bindings(shost), peers);
+			spin_unlock_irqrestore(shost->host_lock, flags);
 			fc_rport_terminate(rport);
+			spin_lock_irqsave(shost->host_lock, flags);
+		}
 		spin_unlock_irqrestore(shost->host_lock, flags);
 	}
 

+ 8 - 0
drivers/video/fbsysfs.c

@@ -242,6 +242,13 @@ static ssize_t show_virtual(struct class_device *class_device, char *buf)
 			fb_info->var.yres_virtual);
 }
 
+static ssize_t show_stride(struct class_device *class_device, char *buf)
+{
+	struct fb_info *fb_info =
+		(struct fb_info *)class_get_devdata(class_device);
+	return snprintf(buf, PAGE_SIZE, "%d\n", fb_info->fix.line_length);
+}
+
 /* Format for cmap is "%02x%c%4x%4x%4x\n" */
 /* %02x entry %c transp %4x red %4x blue %4x green \n */
 /* 256 rows at 16 chars equals 4096, the normal page size */
@@ -432,6 +439,7 @@ static struct class_device_attribute class_device_attrs[] = {
 	__ATTR(pan, S_IRUGO|S_IWUSR, show_pan, store_pan),
 	__ATTR(virtual_size, S_IRUGO|S_IWUSR, show_virtual, store_virtual),
 	__ATTR(name, S_IRUGO, show_name, NULL),
+	__ATTR(stride, S_IRUGO, show_stride, NULL),
 };
 
 int fb_init_class_device(struct fb_info *fb_info)

+ 6 - 0
fs/aio.c

@@ -1397,6 +1397,9 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb)
 		if (unlikely(!access_ok(VERIFY_WRITE, kiocb->ki_buf,
 			kiocb->ki_left)))
 			break;
+		ret = security_file_permission(file, MAY_READ);
+		if (unlikely(ret))
+			break;
 		ret = -EINVAL;
 		if (file->f_op->aio_read)
 			kiocb->ki_retry = aio_pread;
@@ -1409,6 +1412,9 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb)
 		if (unlikely(!access_ok(VERIFY_READ, kiocb->ki_buf,
 			kiocb->ki_left)))
 			break;
+		ret = security_file_permission(file, MAY_WRITE);
+		if (unlikely(ret))
+			break;
 		ret = -EINVAL;
 		if (file->f_op->aio_write)
 			kiocb->ki_retry = aio_pwrite;

+ 1 - 0
fs/inotify.c

@@ -176,6 +176,7 @@ static inline void put_inotify_dev(struct inotify_device *dev)
 	if (atomic_dec_and_test(&dev->count)) {
 		atomic_dec(&dev->user->inotify_devs);
 		free_uid(dev->user);
+		idr_destroy(&dev->idr);
 		kfree(dev);
 	}
 }

+ 10 - 4
include/asm-alpha/atomic.h

@@ -1,6 +1,8 @@
 #ifndef _ALPHA_ATOMIC_H
 #define _ALPHA_ATOMIC_H
 
+#include <asm/barrier.h>
+
 /*
  * Atomic operations that C can't guarantee us.  Useful for
  * resource counting etc...
@@ -100,18 +102,19 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
 static __inline__ long atomic_add_return(int i, atomic_t * v)
 {
 	long temp, result;
+	smp_mb();
 	__asm__ __volatile__(
 	"1:	ldl_l %0,%1\n"
 	"	addl %0,%3,%2\n"
 	"	addl %0,%3,%0\n"
 	"	stl_c %0,%1\n"
 	"	beq %0,2f\n"
-	"	mb\n"
 	".subsection 2\n"
 	"2:	br 1b\n"
 	".previous"
 	:"=&r" (temp), "=m" (v->counter), "=&r" (result)
 	:"Ir" (i), "m" (v->counter) : "memory");
+	smp_mb();
 	return result;
 }
 
@@ -120,54 +123,57 @@ static __inline__ long atomic_add_return(int i, atomic_t * v)
 static __inline__ long atomic64_add_return(long i, atomic64_t * v)
 {
 	long temp, result;
+	smp_mb();
 	__asm__ __volatile__(
 	"1:	ldq_l %0,%1\n"
 	"	addq %0,%3,%2\n"
 	"	addq %0,%3,%0\n"
 	"	stq_c %0,%1\n"
 	"	beq %0,2f\n"
-	"	mb\n"
 	".subsection 2\n"
 	"2:	br 1b\n"
 	".previous"
 	:"=&r" (temp), "=m" (v->counter), "=&r" (result)
 	:"Ir" (i), "m" (v->counter) : "memory");
+	smp_mb();
 	return result;
 }
 
 static __inline__ long atomic_sub_return(int i, atomic_t * v)
 {
 	long temp, result;
+	smp_mb();
 	__asm__ __volatile__(
 	"1:	ldl_l %0,%1\n"
 	"	subl %0,%3,%2\n"
 	"	subl %0,%3,%0\n"
 	"	stl_c %0,%1\n"
 	"	beq %0,2f\n"
-	"	mb\n"
 	".subsection 2\n"
 	"2:	br 1b\n"
 	".previous"
 	:"=&r" (temp), "=m" (v->counter), "=&r" (result)
 	:"Ir" (i), "m" (v->counter) : "memory");
+	smp_mb();
 	return result;
 }
 
 static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
 {
 	long temp, result;
+	smp_mb();
 	__asm__ __volatile__(
 	"1:	ldq_l %0,%1\n"
 	"	subq %0,%3,%2\n"
 	"	subq %0,%3,%0\n"
 	"	stq_c %0,%1\n"
 	"	beq %0,2f\n"
-	"	mb\n"
 	".subsection 2\n"
 	"2:	br 1b\n"
 	".previous"
 	:"=&r" (temp), "=m" (v->counter), "=&r" (result)
 	:"Ir" (i), "m" (v->counter) : "memory");
+	smp_mb();
 	return result;
 }
 

+ 34 - 0
include/asm-alpha/barrier.h

@@ -0,0 +1,34 @@
+#ifndef __BARRIER_H
+#define __BARRIER_H
+
+#define mb() \
+__asm__ __volatile__("mb": : :"memory")
+
+#define rmb() \
+__asm__ __volatile__("mb": : :"memory")
+
+#define wmb() \
+__asm__ __volatile__("wmb": : :"memory")
+
+#define read_barrier_depends() \
+__asm__ __volatile__("mb": : :"memory")
+
+#ifdef CONFIG_SMP
+#define smp_mb()	mb()
+#define smp_rmb()	rmb()
+#define smp_wmb()	wmb()
+#define smp_read_barrier_depends()	read_barrier_depends()
+#else
+#define smp_mb()	barrier()
+#define smp_rmb()	barrier()
+#define smp_wmb()	barrier()
+#define smp_read_barrier_depends()	barrier()
+#endif
+
+#define set_mb(var, value) \
+do { var = value; mb(); } while (0)
+
+#define set_wmb(var, value) \
+do { var = value; wmb(); } while (0)
+
+#endif		/* __BARRIER_H */

+ 1 - 30
include/asm-alpha/system.h

@@ -4,6 +4,7 @@
 #include <linux/config.h>
 #include <asm/pal.h>
 #include <asm/page.h>
+#include <asm/barrier.h>
 
 /*
  * System defines.. Note that this is included both from .c and .S
@@ -139,36 +140,6 @@ extern void halt(void) __attribute__((noreturn));
 struct task_struct;
 extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct*);
 
-#define mb() \
-__asm__ __volatile__("mb": : :"memory")
-
-#define rmb() \
-__asm__ __volatile__("mb": : :"memory")
-
-#define wmb() \
-__asm__ __volatile__("wmb": : :"memory")
-
-#define read_barrier_depends() \
-__asm__ __volatile__("mb": : :"memory")
-
-#ifdef CONFIG_SMP
-#define smp_mb()	mb()
-#define smp_rmb()	rmb()
-#define smp_wmb()	wmb()
-#define smp_read_barrier_depends()	read_barrier_depends()
-#else
-#define smp_mb()	barrier()
-#define smp_rmb()	barrier()
-#define smp_wmb()	barrier()
-#define smp_read_barrier_depends()	barrier()
-#endif
-
-#define set_mb(var, value) \
-do { var = value; mb(); } while (0)
-
-#define set_wmb(var, value) \
-do { var = value; wmb(); } while (0)
-
 #define imb() \
 __asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory")
 

+ 17 - 4
include/asm-arm/arch-s3c2410/regs-clock.h

@@ -18,7 +18,9 @@
  *    10-Feb-2005 Ben Dooks	    Fixed CAMDIVN address (Guillaume Gourat)
  *    10-Mar-2005 Lucas Villa Real  Changed S3C2410_VA to S3C24XX_VA
  *    27-Aug-2005 Ben Dooks	    Add clock-slow info
- */
+ *    20-Oct-2005 Ben Dooks	    Fixed overflow in PLL (Guillaume Gourat)
+ *    20-Oct-2005 Ben Dooks	    Add masks for DCLK (Guillaume Gourat)
+*/
 
 #ifndef __ASM_ARM_REGS_CLOCK
 #define __ASM_ARM_REGS_CLOCK "$Id: clock.h,v 1.4 2003/04/30 14:50:51 ben Exp $"
@@ -66,11 +68,16 @@
 #define S3C2410_DCLKCON_DCLK0_UCLK   (1<<1)
 #define S3C2410_DCLKCON_DCLK0_DIV(x) (((x) - 1 )<<4)
 #define S3C2410_DCLKCON_DCLK0_CMP(x) (((x) - 1 )<<8)
+#define S3C2410_DCLKCON_DCLK0_DIV_MASK ((0xf)<<4)
+#define S3C2410_DCLKCON_DCLK0_CMP_MASK ((0xf)<<8)
 
 #define S3C2410_DCLKCON_DCLK1EN	     (1<<16)
 #define S3C2410_DCLKCON_DCLK1_PCLK   (0<<17)
 #define S3C2410_DCLKCON_DCLK1_UCLK   (1<<17)
 #define S3C2410_DCLKCON_DCLK1_DIV(x) (((x) - 1) <<20)
+#define S3C2410_DCLKCON_DCLK1_CMP(x) (((x) - 1) <<24)
+#define S3C2410_DCLKCON_DCLK1_DIV_MASK ((0xf) <<20)
+#define S3C2410_DCLKCON_DCLK1_CMP_MASK ((0xf) <<24)
 
 #define S3C2410_CLKDIVN_PDIVN	     (1<<0)
 #define S3C2410_CLKDIVN_HDIVN	     (1<<1)
@@ -83,10 +90,13 @@
 
 #ifndef __ASSEMBLY__
 
+#include <asm/div64.h>
+
 static inline unsigned int
-s3c2410_get_pll(int pllval, int baseclk)
+s3c2410_get_pll(unsigned int pllval, unsigned int baseclk)
 {
-	int mdiv, pdiv, sdiv;
+	unsigned int mdiv, pdiv, sdiv;
+	uint64_t fvco;
 
 	mdiv = pllval >> S3C2410_PLLCON_MDIVSHIFT;
 	pdiv = pllval >> S3C2410_PLLCON_PDIVSHIFT;
@@ -96,7 +106,10 @@ s3c2410_get_pll(int pllval, int baseclk)
 	pdiv &= S3C2410_PLLCON_PDIVMASK;
 	sdiv &= S3C2410_PLLCON_SDIVMASK;
 
-	return (baseclk * (mdiv + 8)) / ((pdiv + 2) << sdiv);
+	fvco = (uint64_t)baseclk * (mdiv + 8);
+	do_div(fvco, (pdiv + 2) << sdiv);
+
+	return (unsigned int)fvco;
 }
 
 #endif /* __ASSEMBLY__ */

+ 3 - 13
include/linux/hugetlb.h

@@ -25,6 +25,8 @@ int is_hugepage_mem_enough(size_t);
 unsigned long hugetlb_total_pages(void);
 struct page *alloc_huge_page(void);
 void free_huge_page(struct page *);
+int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+			unsigned long address, int write_access);
 
 extern unsigned long max_huge_pages;
 extern const unsigned long hugetlb_zero, hugetlb_infinity;
@@ -99,6 +101,7 @@ static inline unsigned long hugetlb_total_pages(void)
 						do { } while (0)
 #define alloc_huge_page()			({ NULL; })
 #define free_huge_page(p)			({ (void)(p); BUG(); })
+#define hugetlb_fault(mm, vma, addr, write)	({ BUG(); 0; })
 
 #ifndef HPAGE_MASK
 #define HPAGE_MASK	0		/* Keep the compiler happy */
@@ -155,24 +158,11 @@ static inline void set_file_hugepages(struct file *file)
 {
 	file->f_op = &hugetlbfs_file_operations;
 }
-
-static inline int valid_hugetlb_file_off(struct vm_area_struct *vma, 
-					  unsigned long address) 
-{
-	struct inode *inode = vma->vm_file->f_dentry->d_inode;
-	loff_t file_off = address - vma->vm_start;
-	
-	file_off += (vma->vm_pgoff << PAGE_SHIFT);
-	
-	return (file_off < inode->i_size);
-}
-
 #else /* !CONFIG_HUGETLBFS */
 
 #define is_file_hugepages(file)		0
 #define set_file_hugepages(file)	BUG()
 #define hugetlb_zero_setup(size)	ERR_PTR(-ENOSYS)
-#define valid_hugetlb_file_off(vma, address) 	0
 
 #endif /* !CONFIG_HUGETLBFS */
 

+ 1 - 0
include/linux/idr.h

@@ -75,4 +75,5 @@ int idr_pre_get(struct idr *idp, unsigned gfp_mask);
 int idr_get_new(struct idr *idp, void *ptr, int *id);
 int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id);
 void idr_remove(struct idr *idp, int id);
+void idr_destroy(struct idr *idp);
 void idr_init(struct idr *idp);

+ 1 - 1
include/net/ax25.h

@@ -171,7 +171,7 @@ typedef struct {
 	ax25_address		calls[AX25_MAX_DIGIS];
 	unsigned char		repeated[AX25_MAX_DIGIS];
 	unsigned char		ndigi;
-	char			lastrepeat;
+	signed char		lastrepeat;
 } ax25_digi;
 
 typedef struct ax25_route {

+ 3 - 1
include/net/llc_pdu.h

@@ -254,8 +254,10 @@ static inline void llc_pdu_decode_sa(struct sk_buff *skb, u8 *sa)
 {
 	if (skb->protocol == ntohs(ETH_P_802_2))
 		memcpy(sa, eth_hdr(skb)->h_source, ETH_ALEN);
-	else if (skb->protocol == ntohs(ETH_P_TR_802_2))
+	else if (skb->protocol == ntohs(ETH_P_TR_802_2)) {
 		memcpy(sa, tr_hdr(skb)->saddr, ETH_ALEN);
+		*sa &= 0x7F;
+	}
 }
 
 /**

+ 1 - 8
kernel/exit.c

@@ -825,14 +825,6 @@ fastcall NORET_TYPE void do_exit(long code)
 
 	tsk->flags |= PF_EXITING;
 
-	/*
-	 * Make sure we don't try to process any timer firings
-	 * while we are already exiting.
-	 */
- 	tsk->it_virt_expires = cputime_zero;
- 	tsk->it_prof_expires = cputime_zero;
-	tsk->it_sched_expires = 0;
-
 	if (unlikely(in_atomic()))
 		printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
 				current->comm, current->pid,
@@ -843,6 +835,7 @@ fastcall NORET_TYPE void do_exit(long code)
 	group_dead = atomic_dec_and_test(&tsk->signal->live);
 	if (group_dead) {
  		del_timer_sync(&tsk->signal->real_timer);
+		exit_itimers(tsk->signal);
 		acct_process(code);
 	}
 	exit_mm(tsk);

+ 67 - 52
kernel/posix-cpu-timers.c

@@ -380,28 +380,31 @@ int posix_cpu_timer_create(struct k_itimer *new_timer)
 int posix_cpu_timer_del(struct k_itimer *timer)
 {
 	struct task_struct *p = timer->it.cpu.task;
+	int ret = 0;
 
-	if (timer->it.cpu.firing)
-		return TIMER_RETRY;
-
-	if (unlikely(p == NULL))
-		return 0;
+	if (likely(p != NULL)) {
+		read_lock(&tasklist_lock);
+		if (unlikely(p->signal == NULL)) {
+			/*
+			 * We raced with the reaping of the task.
+			 * The deletion should have cleared us off the list.
+			 */
+			BUG_ON(!list_empty(&timer->it.cpu.entry));
+		} else {
+			spin_lock(&p->sighand->siglock);
+			if (timer->it.cpu.firing)
+				ret = TIMER_RETRY;
+			else
+				list_del(&timer->it.cpu.entry);
+			spin_unlock(&p->sighand->siglock);
+		}
+		read_unlock(&tasklist_lock);
 
-	spin_lock(&p->sighand->siglock);
-	if (!list_empty(&timer->it.cpu.entry)) {
-		/*
-		 * Take us off the task's timer list.  We don't need to
-		 * take tasklist_lock and check for the task being reaped.
-		 * If it was reaped, it already called posix_cpu_timers_exit
-		 * and posix_cpu_timers_exit_group to clear all the timers
-		 * that pointed to it.
-		 */
-		list_del(&timer->it.cpu.entry);
-		put_task_struct(p);
+		if (!ret)
+			put_task_struct(p);
 	}
-	spin_unlock(&p->sighand->siglock);
 
-	return 0;
+	return ret;
 }
 
 /*
@@ -418,8 +421,6 @@ static void cleanup_timers(struct list_head *head,
 	cputime_t ptime = cputime_add(utime, stime);
 
 	list_for_each_entry_safe(timer, next, head, entry) {
-		put_task_struct(timer->task);
-		timer->task = NULL;
 		list_del_init(&timer->entry);
 		if (cputime_lt(timer->expires.cpu, ptime)) {
 			timer->expires.cpu = cputime_zero;
@@ -431,8 +432,6 @@ static void cleanup_timers(struct list_head *head,
 
 	++head;
 	list_for_each_entry_safe(timer, next, head, entry) {
-		put_task_struct(timer->task);
-		timer->task = NULL;
 		list_del_init(&timer->entry);
 		if (cputime_lt(timer->expires.cpu, utime)) {
 			timer->expires.cpu = cputime_zero;
@@ -444,8 +443,6 @@ static void cleanup_timers(struct list_head *head,
 
 	++head;
 	list_for_each_entry_safe(timer, next, head, entry) {
-		put_task_struct(timer->task);
-		timer->task = NULL;
 		list_del_init(&timer->entry);
 		if (timer->expires.sched < sched_time) {
 			timer->expires.sched = 0;
@@ -489,6 +486,9 @@ static void process_timer_rebalance(struct task_struct *p,
  	struct task_struct *t = p;
 	unsigned int nthreads = atomic_read(&p->signal->live);
 
+	if (!nthreads)
+		return;
+
 	switch (clock_idx) {
 	default:
 		BUG();
@@ -730,9 +730,15 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
 	 * Disarm any old timer after extracting its expiry time.
 	 */
 	BUG_ON(!irqs_disabled());
+
+	ret = 0;
 	spin_lock(&p->sighand->siglock);
 	old_expires = timer->it.cpu.expires;
-	list_del_init(&timer->it.cpu.entry);
+	if (unlikely(timer->it.cpu.firing)) {
+		timer->it.cpu.firing = -1;
+		ret = TIMER_RETRY;
+	} else
+		list_del_init(&timer->it.cpu.entry);
 	spin_unlock(&p->sighand->siglock);
 
 	/*
@@ -780,7 +786,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
 		}
 	}
 
-	if (unlikely(timer->it.cpu.firing)) {
+	if (unlikely(ret)) {
 		/*
 		 * We are colliding with the timer actually firing.
 		 * Punt after filling in the timer's old value, and
@@ -788,8 +794,6 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
 		 * it as an overrun (thanks to bump_cpu_timer above).
 		 */
 		read_unlock(&tasklist_lock);
-		timer->it.cpu.firing = -1;
-		ret = TIMER_RETRY;
 		goto out;
 	}
 
@@ -955,14 +959,16 @@ void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
 static void check_thread_timers(struct task_struct *tsk,
 				struct list_head *firing)
 {
+	int maxfire;
 	struct list_head *timers = tsk->cpu_timers;
 
+	maxfire = 20;
 	tsk->it_prof_expires = cputime_zero;
 	while (!list_empty(timers)) {
 		struct cpu_timer_list *t = list_entry(timers->next,
 						      struct cpu_timer_list,
 						      entry);
-		if (cputime_lt(prof_ticks(tsk), t->expires.cpu)) {
+		if (!--maxfire || cputime_lt(prof_ticks(tsk), t->expires.cpu)) {
 			tsk->it_prof_expires = t->expires.cpu;
 			break;
 		}
@@ -971,12 +977,13 @@ static void check_thread_timers(struct task_struct *tsk,
 	}
 
 	++timers;
+	maxfire = 20;
 	tsk->it_virt_expires = cputime_zero;
 	while (!list_empty(timers)) {
 		struct cpu_timer_list *t = list_entry(timers->next,
 						      struct cpu_timer_list,
 						      entry);
-		if (cputime_lt(virt_ticks(tsk), t->expires.cpu)) {
+		if (!--maxfire || cputime_lt(virt_ticks(tsk), t->expires.cpu)) {
 			tsk->it_virt_expires = t->expires.cpu;
 			break;
 		}
@@ -985,12 +992,13 @@ static void check_thread_timers(struct task_struct *tsk,
 	}
 
 	++timers;
+	maxfire = 20;
 	tsk->it_sched_expires = 0;
 	while (!list_empty(timers)) {
 		struct cpu_timer_list *t = list_entry(timers->next,
 						      struct cpu_timer_list,
 						      entry);
-		if (tsk->sched_time < t->expires.sched) {
+		if (!--maxfire || tsk->sched_time < t->expires.sched) {
 			tsk->it_sched_expires = t->expires.sched;
 			break;
 		}
@@ -1007,6 +1015,7 @@ static void check_thread_timers(struct task_struct *tsk,
 static void check_process_timers(struct task_struct *tsk,
 				 struct list_head *firing)
 {
+	int maxfire;
 	struct signal_struct *const sig = tsk->signal;
 	cputime_t utime, stime, ptime, virt_expires, prof_expires;
 	unsigned long long sched_time, sched_expires;
@@ -1039,12 +1048,13 @@ static void check_process_timers(struct task_struct *tsk,
 	} while (t != tsk);
 	ptime = cputime_add(utime, stime);
 
+	maxfire = 20;
 	prof_expires = cputime_zero;
 	while (!list_empty(timers)) {
 		struct cpu_timer_list *t = list_entry(timers->next,
 						      struct cpu_timer_list,
 						      entry);
-		if (cputime_lt(ptime, t->expires.cpu)) {
+		if (!--maxfire || cputime_lt(ptime, t->expires.cpu)) {
 			prof_expires = t->expires.cpu;
 			break;
 		}
@@ -1053,12 +1063,13 @@ static void check_process_timers(struct task_struct *tsk,
 	}
 
 	++timers;
+	maxfire = 20;
 	virt_expires = cputime_zero;
 	while (!list_empty(timers)) {
 		struct cpu_timer_list *t = list_entry(timers->next,
 						      struct cpu_timer_list,
 						      entry);
-		if (cputime_lt(utime, t->expires.cpu)) {
+		if (!--maxfire || cputime_lt(utime, t->expires.cpu)) {
 			virt_expires = t->expires.cpu;
 			break;
 		}
@@ -1067,12 +1078,13 @@ static void check_process_timers(struct task_struct *tsk,
 	}
 
 	++timers;
+	maxfire = 20;
 	sched_expires = 0;
 	while (!list_empty(timers)) {
 		struct cpu_timer_list *t = list_entry(timers->next,
 						      struct cpu_timer_list,
 						      entry);
-		if (sched_time < t->expires.sched) {
+		if (!--maxfire || sched_time < t->expires.sched) {
 			sched_expires = t->expires.sched;
 			break;
 		}
@@ -1155,6 +1167,9 @@ static void check_process_timers(struct task_struct *tsk,
 		unsigned long long sched_left, sched;
 		const unsigned int nthreads = atomic_read(&sig->live);
 
+		if (!nthreads)
+			return;
+
 		prof_left = cputime_sub(prof_expires, utime);
 		prof_left = cputime_sub(prof_left, stime);
 		prof_left = cputime_div(prof_left, nthreads);
@@ -1280,30 +1295,30 @@ void run_posix_cpu_timers(struct task_struct *tsk)
 
 #undef	UNEXPIRED
 
-	BUG_ON(tsk->exit_state);
-
 	/*
 	 * Double-check with locks held.
 	 */
 	read_lock(&tasklist_lock);
-	spin_lock(&tsk->sighand->siglock);
+	if (likely(tsk->signal != NULL)) {
+		spin_lock(&tsk->sighand->siglock);
 
-	/*
-	 * Here we take off tsk->cpu_timers[N] and tsk->signal->cpu_timers[N]
-	 * all the timers that are firing, and put them on the firing list.
-	 */
-	check_thread_timers(tsk, &firing);
-	check_process_timers(tsk, &firing);
+		/*
+		 * Here we take off tsk->cpu_timers[N] and tsk->signal->cpu_timers[N]
+		 * all the timers that are firing, and put them on the firing list.
+		 */
+		check_thread_timers(tsk, &firing);
+		check_process_timers(tsk, &firing);
 
-	/*
-	 * We must release these locks before taking any timer's lock.
-	 * There is a potential race with timer deletion here, as the
-	 * siglock now protects our private firing list.  We have set
-	 * the firing flag in each timer, so that a deletion attempt
-	 * that gets the timer lock before we do will give it up and
-	 * spin until we've taken care of that timer below.
-	 */
-	spin_unlock(&tsk->sighand->siglock);
+		/*
+		 * We must release these locks before taking any timer's lock.
+		 * There is a potential race with timer deletion here, as the
+		 * siglock now protects our private firing list.  We have set
+		 * the firing flag in each timer, so that a deletion attempt
+		 * that gets the timer lock before we do will give it up and
+		 * spin until we've taken care of that timer below.
+		 */
+		spin_unlock(&tsk->sighand->siglock);
+	}
 	read_unlock(&tasklist_lock);
 
 	/*

+ 1 - 1
kernel/posix-timers.c

@@ -1157,7 +1157,7 @@ retry_delete:
 }
 
 /*
- * This is called by __exit_signal, only when there are no more
+ * This is called by do_exit or de_thread, only when there are no more
  * references to the shared signal_struct.
  */
 void exit_itimers(struct signal_struct *sig)

+ 1 - 13
kernel/signal.c

@@ -397,20 +397,8 @@ void __exit_signal(struct task_struct *tsk)
 	flush_sigqueue(&tsk->pending);
 	if (sig) {
 		/*
-		 * We are cleaning up the signal_struct here.  We delayed
-		 * calling exit_itimers until after flush_sigqueue, just in
-		 * case our thread-local pending queue contained a queued
-		 * timer signal that would have been cleared in
-		 * exit_itimers.  When that called sigqueue_free, it would
-		 * attempt to re-take the tasklist_lock and deadlock.  This
-		 * can never happen if we ensure that all queues the
-		 * timer's signal might be queued on have been flushed
-		 * first.  The shared_pending queue, and our own pending
-		 * queue are the only queues the timer could be on, since
-		 * there are no other threads left in the group and timer
-		 * signals are constrained to threads inside the group.
+		 * We are cleaning up the signal_struct here.
 		 */
-		exit_itimers(sig);
 		exit_thread_group_keys(sig);
 		kmem_cache_free(signal_cachep, sig);
 	}

+ 13 - 0
lib/idr.c

@@ -345,6 +345,19 @@ void idr_remove(struct idr *idp, int id)
 }
 EXPORT_SYMBOL(idr_remove);
 
+/**
+ * idr_destroy - release all cached layers within an idr tree
+ * idp: idr handle
+ */
+void idr_destroy(struct idr *idp)
+{
+	while (idp->id_free_cnt) {
+		struct idr_layer *p = alloc_layer(idp);
+		kmem_cache_free(idr_layer_cache, p);
+	}
+}
+EXPORT_SYMBOL(idr_destroy);
+
 /**
  * idr_find - return pointer for given id
  * @idp: idr handle

+ 22 - 0
mm/hugetlb.c

@@ -394,6 +394,28 @@ out:
 	return ret;
 }
 
+/*
+ * On ia64 at least, it is possible to receive a hugetlb fault from a
+ * stale zero entry left in the TLB from earlier hardware prefetching.
+ * Low-level arch code should already have flushed the stale entry as
+ * part of its fault handling, but we do need to accept this minor fault
+ * and return successfully.  Whereas the "normal" case is that this is
+ * an access to a hugetlb page which has been truncated off since mmap.
+ */
+int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+			unsigned long address, int write_access)
+{
+	int ret = VM_FAULT_SIGBUS;
+	pte_t *pte;
+
+	spin_lock(&mm->page_table_lock);
+	pte = huge_pte_offset(mm, address);
+	if (pte && !pte_none(*pte))
+		ret = VM_FAULT_MINOR;
+	spin_unlock(&mm->page_table_lock);
+	return ret;
+}
+
 int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
 			struct page **pages, struct vm_area_struct **vmas,
 			unsigned long *position, int *length, int i)

+ 2 - 12
mm/memory.c

@@ -2045,18 +2045,8 @@ int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma,
 
 	inc_page_state(pgfault);
 
-	if (unlikely(is_vm_hugetlb_page(vma))) {
-		if (valid_hugetlb_file_off(vma, address))
-			/* We get here only if there was a stale(zero) TLB entry 
-			 * (because of  HW prefetching). 
-			 * Low-level arch code (if needed) should have already
-			 * purged the stale entry as part of this fault handling.  
-			 * Here we just return.
-			 */
-			return VM_FAULT_MINOR; 
-		else
-			return VM_FAULT_SIGBUS;	/* mapping truncation does this. */
-	}
+	if (unlikely(is_vm_hugetlb_page(vma)))
+		return hugetlb_fault(mm, vma, address, write_access);
 
 	/*
 	 * We need the page table lock to synchronize with kswapd

+ 3 - 2
net/802/tr.c

@@ -340,9 +340,10 @@ static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev)
 	unsigned int hash, rii_p = 0;
 	unsigned long flags;
 	struct rif_cache *entry;
-
+	unsigned char saddr0;
 
 	spin_lock_irqsave(&rif_lock, flags);
+	saddr0 = trh->saddr[0];
 	
 	/*
 	 *	Firstly see if the entry exists
@@ -395,7 +396,6 @@ printk("adding rif_entry: addr:%02X:%02X:%02X:%02X:%02X:%02X rcf:%04X\n",
 			entry->rcf = trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK);
 			memcpy(&(entry->rseg[0]),&(trh->rseg[0]),8*sizeof(unsigned short));
 			entry->local_ring = 0;
-			trh->saddr[0]|=TR_RII; /* put the routing indicator back for tcpdump */
 		}
 		else
 		{
@@ -422,6 +422,7 @@ printk("updating rif_entry: addr:%02X:%02X:%02X:%02X:%02X:%02X rcf:%04X\n",
 		    }                                         
            	entry->last_used=jiffies;               
 	}
+	trh->saddr[0]=saddr0; /* put the routing indicator back for tcpdump */
 	spin_unlock_irqrestore(&rif_lock, flags);
 }
 

+ 16 - 32
net/core/neighbour.c

@@ -175,39 +175,10 @@ static void pneigh_queue_purge(struct sk_buff_head *list)
 	}
 }
 
-void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
+static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
 {
 	int i;
 
-	write_lock_bh(&tbl->lock);
-
-	for (i=0; i <= tbl->hash_mask; i++) {
-		struct neighbour *n, **np;
-
-		np = &tbl->hash_buckets[i];
-		while ((n = *np) != NULL) {
-			if (dev && n->dev != dev) {
-				np = &n->next;
-				continue;
-			}
-			*np = n->next;
-			write_lock_bh(&n->lock);
-			n->dead = 1;
-			neigh_del_timer(n);
-			write_unlock_bh(&n->lock);
-			neigh_release(n);
-		}
-	}
-
-        write_unlock_bh(&tbl->lock);
-}
-
-int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
-{
-	int i;
-
-	write_lock_bh(&tbl->lock);
-
 	for (i = 0; i <= tbl->hash_mask; i++) {
 		struct neighbour *n, **np = &tbl->hash_buckets[i];
 
@@ -243,7 +214,19 @@ int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
 			neigh_release(n);
 		}
 	}
+}
 
+void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
+{
+	write_lock_bh(&tbl->lock);
+	neigh_flush_dev(tbl, dev);
+	write_unlock_bh(&tbl->lock);
+}
+
+int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
+{
+	write_lock_bh(&tbl->lock);
+	neigh_flush_dev(tbl, dev);
 	pneigh_ifdown(tbl, dev);
 	write_unlock_bh(&tbl->lock);
 
@@ -732,6 +715,7 @@ static inline void neigh_add_timer(struct neighbour *n, unsigned long when)
 	if (unlikely(mod_timer(&n->timer, when))) {
 		printk("NEIGH: BUG, double timer add, state is %x\n",
 		       n->nud_state);
+		dump_stack();
 	}
 }
 
@@ -815,10 +799,10 @@ static void neigh_timer_handler(unsigned long arg)
 	}
 
 	if (neigh->nud_state & NUD_IN_TIMER) {
-		neigh_hold(neigh);
 		if (time_before(next, jiffies + HZ/2))
 			next = jiffies + HZ/2;
-		neigh_add_timer(neigh, next);
+		if (!mod_timer(&neigh->timer, next))
+			neigh_hold(neigh);
 	}
 	if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
 		struct sk_buff *skb = skb_peek(&neigh->arp_queue);

+ 6 - 0
net/core/skbuff.c

@@ -410,6 +410,9 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
 	C(nfct);
 	nf_conntrack_get(skb->nfct);
 	C(nfctinfo);
+#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
+	C(ipvs_property);
+#endif
 #ifdef CONFIG_BRIDGE_NETFILTER
 	C(nf_bridge);
 	nf_bridge_get(skb->nf_bridge);
@@ -467,6 +470,9 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
 	new->nfct	= old->nfct;
 	nf_conntrack_get(old->nfct);
 	new->nfctinfo	= old->nfctinfo;
+#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
+	new->ipvs_property = old->ipvs_property;
+#endif
 #ifdef CONFIG_BRIDGE_NETFILTER
 	new->nf_bridge	= old->nf_bridge;
 	nf_bridge_get(old->nf_bridge);

+ 2 - 0
net/dccp/ipv4.c

@@ -463,6 +463,7 @@ static int dccp_v4_send_response(struct sock *sk, struct request_sock *req,
 	if (skb != NULL) {
 		const struct inet_request_sock *ireq = inet_rsk(req);
 
+		memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
 		err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
 					    ireq->rmt_addr,
 					    ireq->opt);
@@ -647,6 +648,7 @@ int dccp_v4_send_reset(struct sock *sk, enum dccp_reset_codes code)
 	if (skb != NULL) {
 		const struct inet_sock *inet = inet_sk(sk);
 
+		memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
 		err = ip_build_and_send_pkt(skb, sk,
 					    inet->saddr, inet->daddr, NULL);
 		if (err == NET_XMIT_CN)

+ 5 - 5
net/dccp/output.c

@@ -62,10 +62,8 @@ int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
 		
 		skb->h.raw = skb_push(skb, dccp_header_size);
 		dh = dccp_hdr(skb);
-		/*
-		 * Data packets are not cloned as they are never retransmitted
-		 */
-		if (skb_cloned(skb))
+
+		if (!skb->sk)
 			skb_set_owner_w(skb, sk);
 
 		/* Build DCCP header and checksum it. */
@@ -102,6 +100,7 @@ int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
 
 		DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
 
+		memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
 		err = ip_queue_xmit(skb, 0);
 		if (err <= 0)
 			return err;
@@ -243,7 +242,8 @@ int dccp_write_xmit(struct sock *sk, struct sk_buff *skb, long *timeo)
 
 		err = dccp_transmit_skb(sk, skb);
 		ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len);
-	}
+	} else
+		kfree_skb(skb);
 
 	return err;
 }

+ 0 - 2
net/dccp/proto.c

@@ -402,8 +402,6 @@ int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 	 *     This bug was _quickly_ found & fixed by just looking at an OSTRA
 	 *     generated callgraph 8) -acme
 	 */
-	if (rc != 0)
-		goto out_discard;
 out_release:
 	release_sock(sk);
 	return rc ? : len;

+ 3 - 0
net/ipv4/ip_output.c

@@ -391,6 +391,9 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
 	to->nfct = from->nfct;
 	nf_conntrack_get(to->nfct);
 	to->nfctinfo = from->nfctinfo;
+#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
+	to->ipvs_property = from->ipvs_property;
+#endif
 #ifdef CONFIG_BRIDGE_NETFILTER
 	nf_bridge_put(to->nf_bridge);
 	to->nf_bridge = from->nf_bridge;

+ 1 - 11
net/ipv4/tcp_output.c

@@ -435,17 +435,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss
 	int nsize, old_factor;
 	u16 flags;
 
-	if (unlikely(len >= skb->len)) {
-		if (net_ratelimit()) {
-			printk(KERN_DEBUG "TCP: seg_size=%u, mss=%u, seq=%u, "
-			       "end_seq=%u, skb->len=%u.\n", len, mss_now,
-			       TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
-			       skb->len);
-			WARN_ON(1);
-		}
-		return 0;
-	}
-
+	BUG_ON(len > skb->len);
 	nsize = skb_headlen(skb) - len;
 	if (nsize < 0)
 		nsize = 0;

+ 1 - 1
net/ipv6/ip6_flowlabel.c

@@ -483,7 +483,7 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
 						goto done;
 					}
 					fl1 = sfl->fl;
-					atomic_inc(&fl->users);
+					atomic_inc(&fl1->users);
 					break;
 				}
 			}

+ 3 - 1
security/selinux/selinuxfs.c

@@ -879,7 +879,7 @@ static ssize_t sel_commit_bools_write(struct file *filep,
 	if (sscanf(page, "%d", &new_value) != 1)
 		goto out;
 
-	if (new_value) {
+	if (new_value && bool_pending_values) {
 		security_set_bools(bool_num, bool_pending_values);
 	}
 
@@ -952,6 +952,7 @@ static int sel_make_bools(void)
 
 	/* remove any existing files */
 	kfree(bool_pending_values);
+	bool_pending_values = NULL;
 
 	sel_remove_bools(dir);
 
@@ -1002,6 +1003,7 @@ out:
 	}
 	return ret;
 err:
+	kfree(values);
 	d_genocide(dir);
 	ret = -ENOMEM;
 	goto out;

+ 4 - 2
security/selinux/ss/policydb.c

@@ -650,8 +650,10 @@ void policydb_destroy(struct policydb *p)
 	}
 	if (lrt) kfree(lrt);
 
-	for (i = 0; i < p->p_types.nprim; i++)
-		ebitmap_destroy(&p->type_attr_map[i]);
+	if (p->type_attr_map) {
+		for (i = 0; i < p->p_types.nprim; i++)
+			ebitmap_destroy(&p->type_attr_map[i]);
+	}
 	kfree(p->type_attr_map);
 
 	return;

+ 4 - 2
sound/core/init.c

@@ -828,7 +828,8 @@ static int snd_generic_suspend(struct device *dev, pm_message_t state, u32 level
 	card = get_snd_generic_card(dev);
 	if (card->power_state == SNDRV_CTL_POWER_D3hot)
 		return 0;
-	card->pm_suspend(card, PMSG_SUSPEND);
+	if (card->pm_suspend)
+		card->pm_suspend(card, PMSG_SUSPEND);
 	snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
 	return 0;
 }
@@ -843,7 +844,8 @@ static int snd_generic_resume(struct device *dev, u32 level)
 	card = get_snd_generic_card(dev);
 	if (card->power_state == SNDRV_CTL_POWER_D0)
 		return 0;
-	card->pm_resume(card);
+	if (card->pm_suspend)
+		card->pm_resume(card);
 	snd_power_change_state(card, SNDRV_CTL_POWER_D0);
 	return 0;
 }

Энэ ялгаанд хэт олон файл өөрчлөгдсөн тул зарим файлыг харуулаагүй болно