Selaa lähdekoodia

Merge branches 'cxgb3', 'endian', 'ipath', 'ipoib', 'iser', 'mad', 'misc', 'mlx4', 'mthca', 'nes' and 'sysfs' into for-next

50 muutettua tiedostoa jossa 1137 lisäystä ja 621 poistoa
  1. 7 8
      drivers/infiniband/core/cm.c
  2. 11 11
      drivers/infiniband/core/cm_msgs.h
  3. 3 1
      drivers/infiniband/core/device.c
  4. 20 20
      drivers/infiniband/core/mad.c
  5. 1 1
      drivers/infiniband/core/mad_rmpp.c
  6. 2 0
      drivers/infiniband/core/sa_query.c
  7. 3 16
      drivers/infiniband/core/sysfs.c
  8. 2 2
      drivers/infiniband/hw/cxgb3/iwch_qp.c
  9. 4 4
      drivers/infiniband/hw/ehca/ehca_sqp.c
  10. 2 2
      drivers/infiniband/hw/ipath/ipath_eeprom.c
  11. 1 1
      drivers/infiniband/hw/ipath/ipath_init_chip.c
  12. 47 48
      drivers/infiniband/hw/ipath/ipath_mad.c
  13. 1 1
      drivers/infiniband/hw/ipath/ipath_rc.c
  14. 2 2
      drivers/infiniband/hw/ipath/ipath_sdma.c
  15. 1 1
      drivers/infiniband/hw/ipath/ipath_uc.c
  16. 2 2
      drivers/infiniband/hw/ipath/ipath_ud.c
  17. 4 4
      drivers/infiniband/hw/ipath/ipath_user_pages.c
  18. 3 3
      drivers/infiniband/hw/ipath/ipath_user_sdma.c
  19. 1 1
      drivers/infiniband/hw/ipath/ipath_verbs.c
  20. 5 5
      drivers/infiniband/hw/ipath/ipath_verbs.h
  21. 20 7
      drivers/infiniband/hw/mlx4/mad.c
  22. 3 2
      drivers/infiniband/hw/mlx4/main.c
  23. 11 11
      drivers/infiniband/hw/mlx4/qp.c
  24. 19 6
      drivers/infiniband/hw/mthca/mthca_mad.c
  25. 1 1
      drivers/infiniband/hw/nes/nes.c
  26. 1 1
      drivers/infiniband/hw/nes/nes.h
  27. 385 193
      drivers/infiniband/hw/nes/nes_cm.c
  28. 8 4
      drivers/infiniband/hw/nes/nes_cm.h
  29. 1 1
      drivers/infiniband/hw/nes/nes_context.h
  30. 13 4
      drivers/infiniband/hw/nes/nes_hw.c
  31. 2 3
      drivers/infiniband/hw/nes/nes_hw.h
  32. 62 80
      drivers/infiniband/hw/nes/nes_nic.c
  33. 1 1
      drivers/infiniband/hw/nes/nes_user.h
  34. 1 1
      drivers/infiniband/hw/nes/nes_utils.c
  35. 170 79
      drivers/infiniband/hw/nes/nes_verbs.c
  36. 1 1
      drivers/infiniband/hw/nes/nes_verbs.h
  37. 7 2
      drivers/infiniband/ulp/ipoib/ipoib_main.c
  38. 0 7
      drivers/infiniband/ulp/iser/iser_verbs.c
  39. 1 1
      drivers/net/mlx4/Makefile
  40. 2 14
      drivers/net/mlx4/catas.c
  41. 11 5
      drivers/net/mlx4/eq.c
  42. 79 27
      drivers/net/mlx4/main.c
  43. 25 2
      drivers/net/mlx4/mlx4.h
  44. 5 8
      drivers/net/mlx4/port.c
  45. 156 0
      drivers/net/mlx4/sense.c
  46. 1 0
      include/linux/mlx4/cmd.h
  47. 4 2
      include/linux/mlx4/device.h
  48. 6 6
      include/rdma/ib_cm.h
  49. 2 2
      include/rdma/ib_mad.h
  50. 17 17
      include/rdma/ib_smi.h

+ 7 - 8
drivers/infiniband/core/cm.c

@@ -927,8 +927,7 @@ int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask,
 	unsigned long flags;
 	unsigned long flags;
 	int ret = 0;
 	int ret = 0;
 
 
-	service_mask = service_mask ? service_mask :
-		       __constant_cpu_to_be64(~0ULL);
+	service_mask = service_mask ? service_mask : ~cpu_to_be64(0);
 	service_id &= service_mask;
 	service_id &= service_mask;
 	if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
 	if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
 	    (service_id != IB_CM_ASSIGN_SERVICE_ID))
 	    (service_id != IB_CM_ASSIGN_SERVICE_ID))
@@ -954,7 +953,7 @@ int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask,
 	spin_lock_irqsave(&cm.lock, flags);
 	spin_lock_irqsave(&cm.lock, flags);
 	if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
 	if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
 		cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
 		cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
-		cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
+		cm_id->service_mask = ~cpu_to_be64(0);
 	} else {
 	} else {
 		cm_id->service_id = service_id;
 		cm_id->service_id = service_id;
 		cm_id->service_mask = service_mask;
 		cm_id->service_mask = service_mask;
@@ -1134,7 +1133,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
 			goto error1;
 			goto error1;
 	}
 	}
 	cm_id->service_id = param->service_id;
 	cm_id->service_id = param->service_id;
-	cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
+	cm_id->service_mask = ~cpu_to_be64(0);
 	cm_id_priv->timeout_ms = cm_convert_to_ms(
 	cm_id_priv->timeout_ms = cm_convert_to_ms(
 				    param->primary_path->packet_life_time) * 2 +
 				    param->primary_path->packet_life_time) * 2 +
 				 cm_convert_to_ms(
 				 cm_convert_to_ms(
@@ -1545,7 +1544,7 @@ static int cm_req_handler(struct cm_work *work)
 	cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
 	cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
 	cm_id_priv->id.context = listen_cm_id_priv->id.context;
 	cm_id_priv->id.context = listen_cm_id_priv->id.context;
 	cm_id_priv->id.service_id = req_msg->service_id;
 	cm_id_priv->id.service_id = req_msg->service_id;
-	cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
+	cm_id_priv->id.service_mask = ~cpu_to_be64(0);
 
 
 	cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
 	cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
 	cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
 	cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
@@ -2898,7 +2897,7 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
 		goto out;
 		goto out;
 
 
 	cm_id->service_id = param->service_id;
 	cm_id->service_id = param->service_id;
-	cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
+	cm_id->service_mask = ~cpu_to_be64(0);
 	cm_id_priv->timeout_ms = param->timeout_ms;
 	cm_id_priv->timeout_ms = param->timeout_ms;
 	cm_id_priv->max_cm_retries = param->max_cm_retries;
 	cm_id_priv->max_cm_retries = param->max_cm_retries;
 	ret = cm_alloc_msg(cm_id_priv, &msg);
 	ret = cm_alloc_msg(cm_id_priv, &msg);
@@ -2992,7 +2991,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
 	cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
 	cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
 	cm_id_priv->id.context = cur_cm_id_priv->id.context;
 	cm_id_priv->id.context = cur_cm_id_priv->id.context;
 	cm_id_priv->id.service_id = sidr_req_msg->service_id;
 	cm_id_priv->id.service_id = sidr_req_msg->service_id;
-	cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
+	cm_id_priv->id.service_mask = ~cpu_to_be64(0);
 
 
 	cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
 	cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
 	cm_process_work(cm_id_priv, work);
 	cm_process_work(cm_id_priv, work);
@@ -3789,7 +3788,7 @@ static int __init ib_cm_init(void)
 	rwlock_init(&cm.device_lock);
 	rwlock_init(&cm.device_lock);
 	spin_lock_init(&cm.lock);
 	spin_lock_init(&cm.lock);
 	cm.listen_service_table = RB_ROOT;
 	cm.listen_service_table = RB_ROOT;
-	cm.listen_service_id = __constant_be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
+	cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
 	cm.remote_id_table = RB_ROOT;
 	cm.remote_id_table = RB_ROOT;
 	cm.remote_qp_table = RB_ROOT;
 	cm.remote_qp_table = RB_ROOT;
 	cm.remote_sidr_table = RB_ROOT;
 	cm.remote_sidr_table = RB_ROOT;

+ 11 - 11
drivers/infiniband/core/cm_msgs.h

@@ -44,17 +44,17 @@
 
 
 #define IB_CM_CLASS_VERSION	2 /* IB specification 1.2 */
 #define IB_CM_CLASS_VERSION	2 /* IB specification 1.2 */
 
 
-#define CM_REQ_ATTR_ID	    __constant_htons(0x0010)
-#define CM_MRA_ATTR_ID	    __constant_htons(0x0011)
-#define CM_REJ_ATTR_ID	    __constant_htons(0x0012)
-#define CM_REP_ATTR_ID	    __constant_htons(0x0013)
-#define CM_RTU_ATTR_ID	    __constant_htons(0x0014)
-#define CM_DREQ_ATTR_ID	    __constant_htons(0x0015)
-#define CM_DREP_ATTR_ID	    __constant_htons(0x0016)
-#define CM_SIDR_REQ_ATTR_ID __constant_htons(0x0017)
-#define CM_SIDR_REP_ATTR_ID __constant_htons(0x0018)
-#define CM_LAP_ATTR_ID      __constant_htons(0x0019)
-#define CM_APR_ATTR_ID      __constant_htons(0x001A)
+#define CM_REQ_ATTR_ID		cpu_to_be16(0x0010)
+#define CM_MRA_ATTR_ID		cpu_to_be16(0x0011)
+#define CM_REJ_ATTR_ID		cpu_to_be16(0x0012)
+#define CM_REP_ATTR_ID		cpu_to_be16(0x0013)
+#define CM_RTU_ATTR_ID		cpu_to_be16(0x0014)
+#define CM_DREQ_ATTR_ID		cpu_to_be16(0x0015)
+#define CM_DREP_ATTR_ID		cpu_to_be16(0x0016)
+#define CM_SIDR_REQ_ATTR_ID	cpu_to_be16(0x0017)
+#define CM_SIDR_REP_ATTR_ID	cpu_to_be16(0x0018)
+#define CM_LAP_ATTR_ID		cpu_to_be16(0x0019)
+#define CM_APR_ATTR_ID		cpu_to_be16(0x001A)
 
 
 enum cm_msg_sequence {
 enum cm_msg_sequence {
 	CM_MSG_SEQUENCE_REQ,
 	CM_MSG_SEQUENCE_REQ,

+ 3 - 1
drivers/infiniband/core/device.c

@@ -193,7 +193,7 @@ void ib_dealloc_device(struct ib_device *device)
 
 
 	BUG_ON(device->reg_state != IB_DEV_UNREGISTERED);
 	BUG_ON(device->reg_state != IB_DEV_UNREGISTERED);
 
 
-	ib_device_unregister_sysfs(device);
+	kobject_put(&device->dev.kobj);
 }
 }
 EXPORT_SYMBOL(ib_dealloc_device);
 EXPORT_SYMBOL(ib_dealloc_device);
 
 
@@ -348,6 +348,8 @@ void ib_unregister_device(struct ib_device *device)
 
 
 	mutex_unlock(&device_mutex);
 	mutex_unlock(&device_mutex);
 
 
+	ib_device_unregister_sysfs(device);
+
 	spin_lock_irqsave(&device->client_data_lock, flags);
 	spin_lock_irqsave(&device->client_data_lock, flags);
 	list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
 	list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
 		kfree(context);
 		kfree(context);

+ 20 - 20
drivers/infiniband/core/mad.c

@@ -301,6 +301,16 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
 	mad_agent_priv->agent.context = context;
 	mad_agent_priv->agent.context = context;
 	mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
 	mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
 	mad_agent_priv->agent.port_num = port_num;
 	mad_agent_priv->agent.port_num = port_num;
+	spin_lock_init(&mad_agent_priv->lock);
+	INIT_LIST_HEAD(&mad_agent_priv->send_list);
+	INIT_LIST_HEAD(&mad_agent_priv->wait_list);
+	INIT_LIST_HEAD(&mad_agent_priv->done_list);
+	INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
+	INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
+	INIT_LIST_HEAD(&mad_agent_priv->local_list);
+	INIT_WORK(&mad_agent_priv->local_work, local_completions);
+	atomic_set(&mad_agent_priv->refcount, 1);
+	init_completion(&mad_agent_priv->comp);
 
 
 	spin_lock_irqsave(&port_priv->reg_lock, flags);
 	spin_lock_irqsave(&port_priv->reg_lock, flags);
 	mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
 	mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
@@ -350,17 +360,6 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
 	list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
 	list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
 	spin_unlock_irqrestore(&port_priv->reg_lock, flags);
 	spin_unlock_irqrestore(&port_priv->reg_lock, flags);
 
 
-	spin_lock_init(&mad_agent_priv->lock);
-	INIT_LIST_HEAD(&mad_agent_priv->send_list);
-	INIT_LIST_HEAD(&mad_agent_priv->wait_list);
-	INIT_LIST_HEAD(&mad_agent_priv->done_list);
-	INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
-	INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
-	INIT_LIST_HEAD(&mad_agent_priv->local_list);
-	INIT_WORK(&mad_agent_priv->local_work, local_completions);
-	atomic_set(&mad_agent_priv->refcount, 1);
-	init_completion(&mad_agent_priv->comp);
-
 	return &mad_agent_priv->agent;
 	return &mad_agent_priv->agent;
 
 
 error4:
 error4:
@@ -743,9 +742,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
 		break;
 		break;
 	case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
 	case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
 		kmem_cache_free(ib_mad_cache, mad_priv);
 		kmem_cache_free(ib_mad_cache, mad_priv);
-		kfree(local);
-		ret = 1;
-		goto out;
+		break;
 	case IB_MAD_RESULT_SUCCESS:
 	case IB_MAD_RESULT_SUCCESS:
 		/* Treat like an incoming receive MAD */
 		/* Treat like an incoming receive MAD */
 		port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
 		port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
@@ -756,10 +753,12 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
 						        &mad_priv->mad.mad);
 						        &mad_priv->mad.mad);
 		}
 		}
 		if (!port_priv || !recv_mad_agent) {
 		if (!port_priv || !recv_mad_agent) {
+			/*
+			 * No receiving agent so drop packet and
+			 * generate send completion.
+			 */
 			kmem_cache_free(ib_mad_cache, mad_priv);
 			kmem_cache_free(ib_mad_cache, mad_priv);
-			kfree(local);
-			ret = 0;
-			goto out;
+			break;
 		}
 		}
 		local->mad_priv = mad_priv;
 		local->mad_priv = mad_priv;
 		local->recv_mad_agent = recv_mad_agent;
 		local->recv_mad_agent = recv_mad_agent;
@@ -2356,7 +2355,7 @@ static void local_completions(struct work_struct *work)
 	struct ib_mad_local_private *local;
 	struct ib_mad_local_private *local;
 	struct ib_mad_agent_private *recv_mad_agent;
 	struct ib_mad_agent_private *recv_mad_agent;
 	unsigned long flags;
 	unsigned long flags;
-	int recv = 0;
+	int free_mad;
 	struct ib_wc wc;
 	struct ib_wc wc;
 	struct ib_mad_send_wc mad_send_wc;
 	struct ib_mad_send_wc mad_send_wc;
 
 
@@ -2370,14 +2369,15 @@ static void local_completions(struct work_struct *work)
 				   completion_list);
 				   completion_list);
 		list_del(&local->completion_list);
 		list_del(&local->completion_list);
 		spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
 		spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
+		free_mad = 0;
 		if (local->mad_priv) {
 		if (local->mad_priv) {
 			recv_mad_agent = local->recv_mad_agent;
 			recv_mad_agent = local->recv_mad_agent;
 			if (!recv_mad_agent) {
 			if (!recv_mad_agent) {
 				printk(KERN_ERR PFX "No receive MAD agent for local completion\n");
 				printk(KERN_ERR PFX "No receive MAD agent for local completion\n");
+				free_mad = 1;
 				goto local_send_completion;
 				goto local_send_completion;
 			}
 			}
 
 
-			recv = 1;
 			/*
 			/*
 			 * Defined behavior is to complete response
 			 * Defined behavior is to complete response
 			 * before request
 			 * before request
@@ -2422,7 +2422,7 @@ local_send_completion:
 
 
 		spin_lock_irqsave(&mad_agent_priv->lock, flags);
 		spin_lock_irqsave(&mad_agent_priv->lock, flags);
 		atomic_dec(&mad_agent_priv->refcount);
 		atomic_dec(&mad_agent_priv->refcount);
-		if (!recv)
+		if (free_mad)
 			kmem_cache_free(ib_mad_cache, local->mad_priv);
 			kmem_cache_free(ib_mad_cache, local->mad_priv);
 		kfree(local);
 		kfree(local);
 	}
 	}

+ 1 - 1
drivers/infiniband/core/mad_rmpp.c

@@ -735,7 +735,7 @@ process_rmpp_data(struct ib_mad_agent_private *agent,
 		goto bad;
 		goto bad;
 	}
 	}
 
 
-	if (rmpp_hdr->seg_num == __constant_htonl(1)) {
+	if (rmpp_hdr->seg_num == cpu_to_be32(1)) {
 		if (!(ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST)) {
 		if (!(ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST)) {
 			rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG;
 			rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG;
 			goto bad;
 			goto bad;

+ 2 - 0
drivers/infiniband/core/sa_query.c

@@ -395,6 +395,8 @@ static void update_sm_ah(struct work_struct *work)
 	}
 	}
 
 
 	spin_lock_irq(&port->ah_lock);
 	spin_lock_irq(&port->ah_lock);
+	if (port->sm_ah)
+		kref_put(&port->sm_ah->ref, free_sm_ah);
 	port->sm_ah = new_ah;
 	port->sm_ah = new_ah;
 	spin_unlock_irq(&port->ah_lock);
 	spin_unlock_irq(&port->ah_lock);
 
 

+ 3 - 16
drivers/infiniband/core/sysfs.c

@@ -66,11 +66,6 @@ struct port_table_attribute {
 	int			index;
 	int			index;
 };
 };
 
 
-static inline int ibdev_is_alive(const struct ib_device *dev)
-{
-	return dev->reg_state == IB_DEV_REGISTERED;
-}
-
 static ssize_t port_attr_show(struct kobject *kobj,
 static ssize_t port_attr_show(struct kobject *kobj,
 			      struct attribute *attr, char *buf)
 			      struct attribute *attr, char *buf)
 {
 {
@@ -80,8 +75,6 @@ static ssize_t port_attr_show(struct kobject *kobj,
 
 
 	if (!port_attr->show)
 	if (!port_attr->show)
 		return -EIO;
 		return -EIO;
-	if (!ibdev_is_alive(p->ibdev))
-		return -ENODEV;
 
 
 	return port_attr->show(p, port_attr, buf);
 	return port_attr->show(p, port_attr, buf);
 }
 }
@@ -562,9 +555,6 @@ static ssize_t show_node_type(struct device *device,
 {
 {
 	struct ib_device *dev = container_of(device, struct ib_device, dev);
 	struct ib_device *dev = container_of(device, struct ib_device, dev);
 
 
-	if (!ibdev_is_alive(dev))
-		return -ENODEV;
-
 	switch (dev->node_type) {
 	switch (dev->node_type) {
 	case RDMA_NODE_IB_CA:	  return sprintf(buf, "%d: CA\n", dev->node_type);
 	case RDMA_NODE_IB_CA:	  return sprintf(buf, "%d: CA\n", dev->node_type);
 	case RDMA_NODE_RNIC:	  return sprintf(buf, "%d: RNIC\n", dev->node_type);
 	case RDMA_NODE_RNIC:	  return sprintf(buf, "%d: RNIC\n", dev->node_type);
@@ -581,9 +571,6 @@ static ssize_t show_sys_image_guid(struct device *device,
 	struct ib_device_attr attr;
 	struct ib_device_attr attr;
 	ssize_t ret;
 	ssize_t ret;
 
 
-	if (!ibdev_is_alive(dev))
-		return -ENODEV;
-
 	ret = ib_query_device(dev, &attr);
 	ret = ib_query_device(dev, &attr);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
@@ -600,9 +587,6 @@ static ssize_t show_node_guid(struct device *device,
 {
 {
 	struct ib_device *dev = container_of(device, struct ib_device, dev);
 	struct ib_device *dev = container_of(device, struct ib_device, dev);
 
 
-	if (!ibdev_is_alive(dev))
-		return -ENODEV;
-
 	return sprintf(buf, "%04x:%04x:%04x:%04x\n",
 	return sprintf(buf, "%04x:%04x:%04x:%04x\n",
 		       be16_to_cpu(((__be16 *) &dev->node_guid)[0]),
 		       be16_to_cpu(((__be16 *) &dev->node_guid)[0]),
 		       be16_to_cpu(((__be16 *) &dev->node_guid)[1]),
 		       be16_to_cpu(((__be16 *) &dev->node_guid)[1]),
@@ -848,6 +832,9 @@ void ib_device_unregister_sysfs(struct ib_device *device)
 	struct kobject *p, *t;
 	struct kobject *p, *t;
 	struct ib_port *port;
 	struct ib_port *port;
 
 
+	/* Hold kobject until ib_dealloc_device() */
+	kobject_get(&device->dev.kobj);
+
 	list_for_each_entry_safe(p, t, &device->port_list, entry) {
 	list_for_each_entry_safe(p, t, &device->port_list, entry) {
 		list_del(&p->entry);
 		list_del(&p->entry);
 		port = container_of(p, struct ib_port, kobj);
 		port = container_of(p, struct ib_port, kobj);

+ 2 - 2
drivers/infiniband/hw/cxgb3/iwch_qp.c

@@ -99,8 +99,8 @@ static int build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr,
 	if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
 	if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
 		plen = 4;
 		plen = 4;
 		wqe->write.sgl[0].stag = wr->ex.imm_data;
 		wqe->write.sgl[0].stag = wr->ex.imm_data;
-		wqe->write.sgl[0].len = __constant_cpu_to_be32(0);
-		wqe->write.num_sgle = __constant_cpu_to_be32(0);
+		wqe->write.sgl[0].len = cpu_to_be32(0);
+		wqe->write.num_sgle = cpu_to_be32(0);
 		*flit_cnt = 6;
 		*flit_cnt = 6;
 	} else {
 	} else {
 		plen = 0;
 		plen = 0;

+ 4 - 4
drivers/infiniband/hw/ehca/ehca_sqp.c

@@ -46,11 +46,11 @@
 #include "ehca_iverbs.h"
 #include "ehca_iverbs.h"
 #include "hcp_if.h"
 #include "hcp_if.h"
 
 
-#define IB_MAD_STATUS_REDIRECT		__constant_htons(0x0002)
-#define IB_MAD_STATUS_UNSUP_VERSION	__constant_htons(0x0004)
-#define IB_MAD_STATUS_UNSUP_METHOD	__constant_htons(0x0008)
+#define IB_MAD_STATUS_REDIRECT		cpu_to_be16(0x0002)
+#define IB_MAD_STATUS_UNSUP_VERSION	cpu_to_be16(0x0004)
+#define IB_MAD_STATUS_UNSUP_METHOD	cpu_to_be16(0x0008)
 
 
-#define IB_PMA_CLASS_PORT_INFO		__constant_htons(0x0001)
+#define IB_PMA_CLASS_PORT_INFO		cpu_to_be16(0x0001)
 
 
 /**
 /**
  * ehca_define_sqp - Defines special queue pair 1 (GSI QP). When special queue
  * ehca_define_sqp - Defines special queue pair 1 (GSI QP). When special queue

+ 2 - 2
drivers/infiniband/hw/ipath/ipath_eeprom.c

@@ -772,8 +772,8 @@ void ipath_get_eeprom_info(struct ipath_devdata *dd)
 			 "0x%x, not 0x%x\n", csum, ifp->if_csum);
 			 "0x%x, not 0x%x\n", csum, ifp->if_csum);
 		goto done;
 		goto done;
 	}
 	}
-	if (*(__be64 *) ifp->if_guid == 0ULL ||
-	    *(__be64 *) ifp->if_guid == __constant_cpu_to_be64(-1LL)) {
+	if (*(__be64 *) ifp->if_guid == cpu_to_be64(0) ||
+	    *(__be64 *) ifp->if_guid == ~cpu_to_be64(0)) {
 		ipath_dev_err(dd, "Invalid GUID %llx from flash; "
 		ipath_dev_err(dd, "Invalid GUID %llx from flash; "
 			      "ignoring\n",
 			      "ignoring\n",
 			      *(unsigned long long *) ifp->if_guid);
 			      *(unsigned long long *) ifp->if_guid);

+ 1 - 1
drivers/infiniband/hw/ipath/ipath_init_chip.c

@@ -455,7 +455,7 @@ static void init_shadow_tids(struct ipath_devdata *dd)
 	if (!addrs) {
 	if (!addrs) {
 		ipath_dev_err(dd, "failed to allocate shadow dma handle "
 		ipath_dev_err(dd, "failed to allocate shadow dma handle "
 			      "array, no expected sends!\n");
 			      "array, no expected sends!\n");
-		vfree(dd->ipath_pageshadow);
+		vfree(pages);
 		dd->ipath_pageshadow = NULL;
 		dd->ipath_pageshadow = NULL;
 		return;
 		return;
 	}
 	}

+ 47 - 48
drivers/infiniband/hw/ipath/ipath_mad.c

@@ -37,10 +37,10 @@
 #include "ipath_verbs.h"
 #include "ipath_verbs.h"
 #include "ipath_common.h"
 #include "ipath_common.h"
 
 
-#define IB_SMP_UNSUP_VERSION	__constant_htons(0x0004)
-#define IB_SMP_UNSUP_METHOD	__constant_htons(0x0008)
-#define IB_SMP_UNSUP_METH_ATTR	__constant_htons(0x000C)
-#define IB_SMP_INVALID_FIELD	__constant_htons(0x001C)
+#define IB_SMP_UNSUP_VERSION	cpu_to_be16(0x0004)
+#define IB_SMP_UNSUP_METHOD	cpu_to_be16(0x0008)
+#define IB_SMP_UNSUP_METH_ATTR	cpu_to_be16(0x000C)
+#define IB_SMP_INVALID_FIELD	cpu_to_be16(0x001C)
 
 
 static int reply(struct ib_smp *smp)
 static int reply(struct ib_smp *smp)
 {
 {
@@ -789,12 +789,12 @@ static int recv_subn_set_pkeytable(struct ib_smp *smp,
 	return recv_subn_get_pkeytable(smp, ibdev);
 	return recv_subn_get_pkeytable(smp, ibdev);
 }
 }
 
 
-#define IB_PMA_CLASS_PORT_INFO		__constant_htons(0x0001)
-#define IB_PMA_PORT_SAMPLES_CONTROL	__constant_htons(0x0010)
-#define IB_PMA_PORT_SAMPLES_RESULT	__constant_htons(0x0011)
-#define IB_PMA_PORT_COUNTERS		__constant_htons(0x0012)
-#define IB_PMA_PORT_COUNTERS_EXT	__constant_htons(0x001D)
-#define IB_PMA_PORT_SAMPLES_RESULT_EXT	__constant_htons(0x001E)
+#define IB_PMA_CLASS_PORT_INFO		cpu_to_be16(0x0001)
+#define IB_PMA_PORT_SAMPLES_CONTROL	cpu_to_be16(0x0010)
+#define IB_PMA_PORT_SAMPLES_RESULT	cpu_to_be16(0x0011)
+#define IB_PMA_PORT_COUNTERS		cpu_to_be16(0x0012)
+#define IB_PMA_PORT_COUNTERS_EXT	cpu_to_be16(0x001D)
+#define IB_PMA_PORT_SAMPLES_RESULT_EXT	cpu_to_be16(0x001E)
 
 
 struct ib_perf {
 struct ib_perf {
 	u8 base_version;
 	u8 base_version;
@@ -884,19 +884,19 @@ struct ib_pma_portcounters {
 	__be32 port_rcv_packets;
 	__be32 port_rcv_packets;
 } __attribute__ ((packed));
 } __attribute__ ((packed));
 
 
-#define IB_PMA_SEL_SYMBOL_ERROR			__constant_htons(0x0001)
-#define IB_PMA_SEL_LINK_ERROR_RECOVERY		__constant_htons(0x0002)
-#define IB_PMA_SEL_LINK_DOWNED			__constant_htons(0x0004)
-#define IB_PMA_SEL_PORT_RCV_ERRORS		__constant_htons(0x0008)
-#define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS	__constant_htons(0x0010)
-#define IB_PMA_SEL_PORT_XMIT_DISCARDS		__constant_htons(0x0040)
-#define IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS	__constant_htons(0x0200)
-#define IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS	__constant_htons(0x0400)
-#define IB_PMA_SEL_PORT_VL15_DROPPED		__constant_htons(0x0800)
-#define IB_PMA_SEL_PORT_XMIT_DATA		__constant_htons(0x1000)
-#define IB_PMA_SEL_PORT_RCV_DATA		__constant_htons(0x2000)
-#define IB_PMA_SEL_PORT_XMIT_PACKETS		__constant_htons(0x4000)
-#define IB_PMA_SEL_PORT_RCV_PACKETS		__constant_htons(0x8000)
+#define IB_PMA_SEL_SYMBOL_ERROR			cpu_to_be16(0x0001)
+#define IB_PMA_SEL_LINK_ERROR_RECOVERY		cpu_to_be16(0x0002)
+#define IB_PMA_SEL_LINK_DOWNED			cpu_to_be16(0x0004)
+#define IB_PMA_SEL_PORT_RCV_ERRORS		cpu_to_be16(0x0008)
+#define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS	cpu_to_be16(0x0010)
+#define IB_PMA_SEL_PORT_XMIT_DISCARDS		cpu_to_be16(0x0040)
+#define IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS	cpu_to_be16(0x0200)
+#define IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS	cpu_to_be16(0x0400)
+#define IB_PMA_SEL_PORT_VL15_DROPPED		cpu_to_be16(0x0800)
+#define IB_PMA_SEL_PORT_XMIT_DATA		cpu_to_be16(0x1000)
+#define IB_PMA_SEL_PORT_RCV_DATA		cpu_to_be16(0x2000)
+#define IB_PMA_SEL_PORT_XMIT_PACKETS		cpu_to_be16(0x4000)
+#define IB_PMA_SEL_PORT_RCV_PACKETS		cpu_to_be16(0x8000)
 
 
 struct ib_pma_portcounters_ext {
 struct ib_pma_portcounters_ext {
 	u8 reserved;
 	u8 reserved;
@@ -913,14 +913,14 @@ struct ib_pma_portcounters_ext {
 	__be64 port_multicast_rcv_packets;
 	__be64 port_multicast_rcv_packets;
 } __attribute__ ((packed));
 } __attribute__ ((packed));
 
 
-#define IB_PMA_SELX_PORT_XMIT_DATA		__constant_htons(0x0001)
-#define IB_PMA_SELX_PORT_RCV_DATA		__constant_htons(0x0002)
-#define IB_PMA_SELX_PORT_XMIT_PACKETS		__constant_htons(0x0004)
-#define IB_PMA_SELX_PORT_RCV_PACKETS		__constant_htons(0x0008)
-#define IB_PMA_SELX_PORT_UNI_XMIT_PACKETS	__constant_htons(0x0010)
-#define IB_PMA_SELX_PORT_UNI_RCV_PACKETS	__constant_htons(0x0020)
-#define IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS	__constant_htons(0x0040)
-#define IB_PMA_SELX_PORT_MULTI_RCV_PACKETS	__constant_htons(0x0080)
+#define IB_PMA_SELX_PORT_XMIT_DATA		cpu_to_be16(0x0001)
+#define IB_PMA_SELX_PORT_RCV_DATA		cpu_to_be16(0x0002)
+#define IB_PMA_SELX_PORT_XMIT_PACKETS		cpu_to_be16(0x0004)
+#define IB_PMA_SELX_PORT_RCV_PACKETS		cpu_to_be16(0x0008)
+#define IB_PMA_SELX_PORT_UNI_XMIT_PACKETS	cpu_to_be16(0x0010)
+#define IB_PMA_SELX_PORT_UNI_RCV_PACKETS	cpu_to_be16(0x0020)
+#define IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS	cpu_to_be16(0x0040)
+#define IB_PMA_SELX_PORT_MULTI_RCV_PACKETS	cpu_to_be16(0x0080)
 
 
 static int recv_pma_get_classportinfo(struct ib_perf *pmp)
 static int recv_pma_get_classportinfo(struct ib_perf *pmp)
 {
 {
@@ -933,7 +933,7 @@ static int recv_pma_get_classportinfo(struct ib_perf *pmp)
 		pmp->status |= IB_SMP_INVALID_FIELD;
 		pmp->status |= IB_SMP_INVALID_FIELD;
 
 
 	/* Indicate AllPortSelect is valid (only one port anyway) */
 	/* Indicate AllPortSelect is valid (only one port anyway) */
-	p->cap_mask = __constant_cpu_to_be16(1 << 8);
+	p->cap_mask = cpu_to_be16(1 << 8);
 	p->base_version = 1;
 	p->base_version = 1;
 	p->class_version = 1;
 	p->class_version = 1;
 	/*
 	/*
@@ -951,12 +951,11 @@ static int recv_pma_get_classportinfo(struct ib_perf *pmp)
  * We support 5 counters which only count the mandatory quantities.
  * We support 5 counters which only count the mandatory quantities.
  */
  */
 #define COUNTER_MASK(q, n) (q << ((9 - n) * 3))
 #define COUNTER_MASK(q, n) (q << ((9 - n) * 3))
-#define COUNTER_MASK0_9 \
-	__constant_cpu_to_be32(COUNTER_MASK(1, 0) | \
-			       COUNTER_MASK(1, 1) | \
-			       COUNTER_MASK(1, 2) | \
-			       COUNTER_MASK(1, 3) | \
-			       COUNTER_MASK(1, 4))
+#define COUNTER_MASK0_9 cpu_to_be32(COUNTER_MASK(1, 0) | \
+				    COUNTER_MASK(1, 1) | \
+				    COUNTER_MASK(1, 2) | \
+				    COUNTER_MASK(1, 3) | \
+				    COUNTER_MASK(1, 4))
 
 
 static int recv_pma_get_portsamplescontrol(struct ib_perf *pmp,
 static int recv_pma_get_portsamplescontrol(struct ib_perf *pmp,
 					   struct ib_device *ibdev, u8 port)
 					   struct ib_device *ibdev, u8 port)
@@ -1137,7 +1136,7 @@ static int recv_pma_get_portsamplesresult_ext(struct ib_perf *pmp,
 		status = dev->pma_sample_status;
 		status = dev->pma_sample_status;
 	p->sample_status = cpu_to_be16(status);
 	p->sample_status = cpu_to_be16(status);
 	/* 64 bits */
 	/* 64 bits */
-	p->extended_width = __constant_cpu_to_be32(0x80000000);
+	p->extended_width = cpu_to_be32(0x80000000);
 	for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++)
 	for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++)
 		p->counter[i] = (status != IB_PMA_SAMPLE_STATUS_DONE) ? 0 :
 		p->counter[i] = (status != IB_PMA_SAMPLE_STATUS_DONE) ? 0 :
 		    cpu_to_be64(
 		    cpu_to_be64(
@@ -1185,7 +1184,7 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp,
 		pmp->status |= IB_SMP_INVALID_FIELD;
 		pmp->status |= IB_SMP_INVALID_FIELD;
 
 
 	if (cntrs.symbol_error_counter > 0xFFFFUL)
 	if (cntrs.symbol_error_counter > 0xFFFFUL)
-		p->symbol_error_counter = __constant_cpu_to_be16(0xFFFF);
+		p->symbol_error_counter = cpu_to_be16(0xFFFF);
 	else
 	else
 		p->symbol_error_counter =
 		p->symbol_error_counter =
 			cpu_to_be16((u16)cntrs.symbol_error_counter);
 			cpu_to_be16((u16)cntrs.symbol_error_counter);
@@ -1199,17 +1198,17 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp,
 	else
 	else
 		p->link_downed_counter = (u8)cntrs.link_downed_counter;
 		p->link_downed_counter = (u8)cntrs.link_downed_counter;
 	if (cntrs.port_rcv_errors > 0xFFFFUL)
 	if (cntrs.port_rcv_errors > 0xFFFFUL)
-		p->port_rcv_errors = __constant_cpu_to_be16(0xFFFF);
+		p->port_rcv_errors = cpu_to_be16(0xFFFF);
 	else
 	else
 		p->port_rcv_errors =
 		p->port_rcv_errors =
 			cpu_to_be16((u16) cntrs.port_rcv_errors);
 			cpu_to_be16((u16) cntrs.port_rcv_errors);
 	if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
 	if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
-		p->port_rcv_remphys_errors = __constant_cpu_to_be16(0xFFFF);
+		p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
 	else
 	else
 		p->port_rcv_remphys_errors =
 		p->port_rcv_remphys_errors =
 			cpu_to_be16((u16)cntrs.port_rcv_remphys_errors);
 			cpu_to_be16((u16)cntrs.port_rcv_remphys_errors);
 	if (cntrs.port_xmit_discards > 0xFFFFUL)
 	if (cntrs.port_xmit_discards > 0xFFFFUL)
-		p->port_xmit_discards = __constant_cpu_to_be16(0xFFFF);
+		p->port_xmit_discards = cpu_to_be16(0xFFFF);
 	else
 	else
 		p->port_xmit_discards =
 		p->port_xmit_discards =
 			cpu_to_be16((u16)cntrs.port_xmit_discards);
 			cpu_to_be16((u16)cntrs.port_xmit_discards);
@@ -1220,24 +1219,24 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp,
 	p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) |
 	p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) |
 		cntrs.excessive_buffer_overrun_errors;
 		cntrs.excessive_buffer_overrun_errors;
 	if (cntrs.vl15_dropped > 0xFFFFUL)
 	if (cntrs.vl15_dropped > 0xFFFFUL)
-		p->vl15_dropped = __constant_cpu_to_be16(0xFFFF);
+		p->vl15_dropped = cpu_to_be16(0xFFFF);
 	else
 	else
 		p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
 		p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
 	if (cntrs.port_xmit_data > 0xFFFFFFFFUL)
 	if (cntrs.port_xmit_data > 0xFFFFFFFFUL)
-		p->port_xmit_data = __constant_cpu_to_be32(0xFFFFFFFF);
+		p->port_xmit_data = cpu_to_be32(0xFFFFFFFF);
 	else
 	else
 		p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data);
 		p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data);
 	if (cntrs.port_rcv_data > 0xFFFFFFFFUL)
 	if (cntrs.port_rcv_data > 0xFFFFFFFFUL)
-		p->port_rcv_data = __constant_cpu_to_be32(0xFFFFFFFF);
+		p->port_rcv_data = cpu_to_be32(0xFFFFFFFF);
 	else
 	else
 		p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data);
 		p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data);
 	if (cntrs.port_xmit_packets > 0xFFFFFFFFUL)
 	if (cntrs.port_xmit_packets > 0xFFFFFFFFUL)
-		p->port_xmit_packets = __constant_cpu_to_be32(0xFFFFFFFF);
+		p->port_xmit_packets = cpu_to_be32(0xFFFFFFFF);
 	else
 	else
 		p->port_xmit_packets =
 		p->port_xmit_packets =
 			cpu_to_be32((u32)cntrs.port_xmit_packets);
 			cpu_to_be32((u32)cntrs.port_xmit_packets);
 	if (cntrs.port_rcv_packets > 0xFFFFFFFFUL)
 	if (cntrs.port_rcv_packets > 0xFFFFFFFFUL)
-		p->port_rcv_packets = __constant_cpu_to_be32(0xFFFFFFFF);
+		p->port_rcv_packets = cpu_to_be32(0xFFFFFFFF);
 	else
 	else
 		p->port_rcv_packets =
 		p->port_rcv_packets =
 			cpu_to_be32((u32) cntrs.port_rcv_packets);
 			cpu_to_be32((u32) cntrs.port_rcv_packets);

+ 1 - 1
drivers/infiniband/hw/ipath/ipath_rc.c

@@ -1744,7 +1744,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
 		/* Signal completion event if the solicited bit is set. */
 		/* Signal completion event if the solicited bit is set. */
 		ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
 		ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
 			       (ohdr->bth[0] &
 			       (ohdr->bth[0] &
-				__constant_cpu_to_be32(1 << 23)) != 0);
+				cpu_to_be32(1 << 23)) != 0);
 		break;
 		break;
 
 
 	case OP(RDMA_WRITE_FIRST):
 	case OP(RDMA_WRITE_FIRST):

+ 2 - 2
drivers/infiniband/hw/ipath/ipath_sdma.c

@@ -781,10 +781,10 @@ retry:
 		descqp = &dd->ipath_sdma_descq[dd->ipath_sdma_descq_cnt].qw[0];
 		descqp = &dd->ipath_sdma_descq[dd->ipath_sdma_descq_cnt].qw[0];
 	descqp -= 2;
 	descqp -= 2;
 	/* SDmaLastDesc */
 	/* SDmaLastDesc */
-	descqp[0] |= __constant_cpu_to_le64(1ULL << 11);
+	descqp[0] |= cpu_to_le64(1ULL << 11);
 	if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_INTREQ) {
 	if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_INTREQ) {
 		/* SDmaIntReq */
 		/* SDmaIntReq */
-		descqp[0] |= __constant_cpu_to_le64(1ULL << 15);
+		descqp[0] |= cpu_to_le64(1ULL << 15);
 	}
 	}
 
 
 	/* Commit writes to memory and advance the tail on the chip */
 	/* Commit writes to memory and advance the tail on the chip */

+ 1 - 1
drivers/infiniband/hw/ipath/ipath_uc.c

@@ -419,7 +419,7 @@ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
 		/* Signal completion event if the solicited bit is set. */
 		/* Signal completion event if the solicited bit is set. */
 		ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
 		ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
 			       (ohdr->bth[0] &
 			       (ohdr->bth[0] &
-				__constant_cpu_to_be32(1 << 23)) != 0);
+				cpu_to_be32(1 << 23)) != 0);
 		break;
 		break;
 
 
 	case OP(RDMA_WRITE_FIRST):
 	case OP(RDMA_WRITE_FIRST):

+ 2 - 2
drivers/infiniband/hw/ipath/ipath_ud.c

@@ -370,7 +370,7 @@ int ipath_make_ud_req(struct ipath_qp *qp)
 	 */
 	 */
 	ohdr->bth[1] = ah_attr->dlid >= IPATH_MULTICAST_LID_BASE &&
 	ohdr->bth[1] = ah_attr->dlid >= IPATH_MULTICAST_LID_BASE &&
 		ah_attr->dlid != IPATH_PERMISSIVE_LID ?
 		ah_attr->dlid != IPATH_PERMISSIVE_LID ?
-		__constant_cpu_to_be32(IPATH_MULTICAST_QPN) :
+		cpu_to_be32(IPATH_MULTICAST_QPN) :
 		cpu_to_be32(wqe->wr.wr.ud.remote_qpn);
 		cpu_to_be32(wqe->wr.wr.ud.remote_qpn);
 	ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & IPATH_PSN_MASK);
 	ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & IPATH_PSN_MASK);
 	/*
 	/*
@@ -573,7 +573,7 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
 	/* Signal completion event if the solicited bit is set. */
 	/* Signal completion event if the solicited bit is set. */
 	ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
 	ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
 		       (ohdr->bth[0] &
 		       (ohdr->bth[0] &
-			__constant_cpu_to_be32(1 << 23)) != 0);
+			cpu_to_be32(1 << 23)) != 0);
 
 
 bail:;
 bail:;
 }
 }

+ 4 - 4
drivers/infiniband/hw/ipath/ipath_user_pages.c

@@ -209,20 +209,20 @@ void ipath_release_user_pages_on_close(struct page **p, size_t num_pages)
 
 
 	mm = get_task_mm(current);
 	mm = get_task_mm(current);
 	if (!mm)
 	if (!mm)
-		goto bail;
+		return;
 
 
 	work = kmalloc(sizeof(*work), GFP_KERNEL);
 	work = kmalloc(sizeof(*work), GFP_KERNEL);
 	if (!work)
 	if (!work)
 		goto bail_mm;
 		goto bail_mm;
 
 
-	goto bail;
-
 	INIT_WORK(&work->work, user_pages_account);
 	INIT_WORK(&work->work, user_pages_account);
 	work->mm = mm;
 	work->mm = mm;
 	work->num_pages = num_pages;
 	work->num_pages = num_pages;
 
 
+	schedule_work(&work->work);
+	return;
+
 bail_mm:
 bail_mm:
 	mmput(mm);
 	mmput(mm);
-bail:
 	return;
 	return;
 }
 }

+ 3 - 3
drivers/infiniband/hw/ipath/ipath_user_sdma.c

@@ -667,13 +667,13 @@ static inline __le64 ipath_sdma_make_desc0(struct ipath_devdata *dd,
 
 
 static inline __le64 ipath_sdma_make_first_desc0(__le64 descq)
 static inline __le64 ipath_sdma_make_first_desc0(__le64 descq)
 {
 {
-	return descq | __constant_cpu_to_le64(1ULL << 12);
+	return descq | cpu_to_le64(1ULL << 12);
 }
 }
 
 
 static inline __le64 ipath_sdma_make_last_desc0(__le64 descq)
 static inline __le64 ipath_sdma_make_last_desc0(__le64 descq)
 {
 {
 					      /* last */  /* dma head */
 					      /* last */  /* dma head */
-	return descq | __constant_cpu_to_le64(1ULL << 11 | 1ULL << 13);
+	return descq | cpu_to_le64(1ULL << 11 | 1ULL << 13);
 }
 }
 
 
 static inline __le64 ipath_sdma_make_desc1(u64 addr)
 static inline __le64 ipath_sdma_make_desc1(u64 addr)
@@ -763,7 +763,7 @@ static int ipath_user_sdma_push_pkts(struct ipath_devdata *dd,
 		if (ofs >= IPATH_SMALLBUF_DWORDS) {
 		if (ofs >= IPATH_SMALLBUF_DWORDS) {
 			for (i = 0; i < pkt->naddr; i++) {
 			for (i = 0; i < pkt->naddr; i++) {
 				dd->ipath_sdma_descq[dtail].qw[0] |=
 				dd->ipath_sdma_descq[dtail].qw[0] |=
-					__constant_cpu_to_le64(1ULL << 14);
+					cpu_to_le64(1ULL << 14);
 				if (++dtail == dd->ipath_sdma_descq_cnt)
 				if (++dtail == dd->ipath_sdma_descq_cnt)
 					dtail = 0;
 					dtail = 0;
 			}
 			}

+ 1 - 1
drivers/infiniband/hw/ipath/ipath_verbs.c

@@ -1585,7 +1585,7 @@ static int ipath_query_port(struct ib_device *ibdev,
 	u64 ibcstat;
 	u64 ibcstat;
 
 
 	memset(props, 0, sizeof(*props));
 	memset(props, 0, sizeof(*props));
-	props->lid = lid ? lid : __constant_be16_to_cpu(IB_LID_PERMISSIVE);
+	props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE);
 	props->lmc = dd->ipath_lmc;
 	props->lmc = dd->ipath_lmc;
 	props->sm_lid = dev->sm_lid;
 	props->sm_lid = dev->sm_lid;
 	props->sm_sl = dev->sm_sl;
 	props->sm_sl = dev->sm_sl;

+ 5 - 5
drivers/infiniband/hw/ipath/ipath_verbs.h

@@ -86,11 +86,11 @@
 #define IB_PMA_SAMPLE_STATUS_RUNNING	0x02
 #define IB_PMA_SAMPLE_STATUS_RUNNING	0x02
 
 
 /* Mandatory IB performance counter select values. */
 /* Mandatory IB performance counter select values. */
-#define IB_PMA_PORT_XMIT_DATA	__constant_htons(0x0001)
-#define IB_PMA_PORT_RCV_DATA	__constant_htons(0x0002)
-#define IB_PMA_PORT_XMIT_PKTS	__constant_htons(0x0003)
-#define IB_PMA_PORT_RCV_PKTS	__constant_htons(0x0004)
-#define IB_PMA_PORT_XMIT_WAIT	__constant_htons(0x0005)
+#define IB_PMA_PORT_XMIT_DATA	cpu_to_be16(0x0001)
+#define IB_PMA_PORT_RCV_DATA	cpu_to_be16(0x0002)
+#define IB_PMA_PORT_XMIT_PKTS	cpu_to_be16(0x0003)
+#define IB_PMA_PORT_RCV_PKTS	cpu_to_be16(0x0004)
+#define IB_PMA_PORT_XMIT_WAIT	cpu_to_be16(0x0005)
 
 
 struct ib_reth {
 struct ib_reth {
 	__be64 vaddr;
 	__be64 vaddr;

+ 20 - 7
drivers/infiniband/hw/mlx4/mad.c

@@ -147,7 +147,8 @@ static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
  * Snoop SM MADs for port info and P_Key table sets, so we can
  * Snoop SM MADs for port info and P_Key table sets, so we can
  * synthesize LID change and P_Key change events.
  * synthesize LID change and P_Key change events.
  */
  */
-static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad)
+static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad,
+				u16 prev_lid)
 {
 {
 	struct ib_event event;
 	struct ib_event event;
 
 
@@ -157,6 +158,7 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad)
 		if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) {
 		if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) {
 			struct ib_port_info *pinfo =
 			struct ib_port_info *pinfo =
 				(struct ib_port_info *) ((struct ib_smp *) mad)->data;
 				(struct ib_port_info *) ((struct ib_smp *) mad)->data;
+			u16 lid = be16_to_cpu(pinfo->lid);
 
 
 			update_sm_ah(to_mdev(ibdev), port_num,
 			update_sm_ah(to_mdev(ibdev), port_num,
 				     be16_to_cpu(pinfo->sm_lid),
 				     be16_to_cpu(pinfo->sm_lid),
@@ -165,12 +167,15 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad)
 			event.device	       = ibdev;
 			event.device	       = ibdev;
 			event.element.port_num = port_num;
 			event.element.port_num = port_num;
 
 
-			if (pinfo->clientrereg_resv_subnetto & 0x80)
+			if (pinfo->clientrereg_resv_subnetto & 0x80) {
 				event.event    = IB_EVENT_CLIENT_REREGISTER;
 				event.event    = IB_EVENT_CLIENT_REREGISTER;
-			else
-				event.event    = IB_EVENT_LID_CHANGE;
+				ib_dispatch_event(&event);
+			}
 
 
-			ib_dispatch_event(&event);
+			if (prev_lid != lid) {
+				event.event    = IB_EVENT_LID_CHANGE;
+				ib_dispatch_event(&event);
+			}
 		}
 		}
 
 
 		if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) {
 		if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) {
@@ -228,8 +233,9 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags,	u8 port_num,
 			struct ib_wc *in_wc, struct ib_grh *in_grh,
 			struct ib_wc *in_wc, struct ib_grh *in_grh,
 			struct ib_mad *in_mad, struct ib_mad *out_mad)
 			struct ib_mad *in_mad, struct ib_mad *out_mad)
 {
 {
-	u16 slid;
+	u16 slid, prev_lid = 0;
 	int err;
 	int err;
+	struct ib_port_attr pattr;
 
 
 	slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
 	slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
 
 
@@ -263,6 +269,13 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags,	u8 port_num,
 	} else
 	} else
 		return IB_MAD_RESULT_SUCCESS;
 		return IB_MAD_RESULT_SUCCESS;
 
 
+	if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
+	     in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
+	    in_mad->mad_hdr.method == IB_MGMT_METHOD_SET &&
+	    in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
+	    !ib_query_port(ibdev, port_num, &pattr))
+		prev_lid = pattr.lid;
+
 	err = mlx4_MAD_IFC(to_mdev(ibdev),
 	err = mlx4_MAD_IFC(to_mdev(ibdev),
 			   mad_flags & IB_MAD_IGNORE_MKEY,
 			   mad_flags & IB_MAD_IGNORE_MKEY,
 			   mad_flags & IB_MAD_IGNORE_BKEY,
 			   mad_flags & IB_MAD_IGNORE_BKEY,
@@ -271,7 +284,7 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags,	u8 port_num,
 		return IB_MAD_RESULT_FAILURE;
 		return IB_MAD_RESULT_FAILURE;
 
 
 	if (!out_mad->mad_hdr.status) {
 	if (!out_mad->mad_hdr.status) {
-		smp_snoop(ibdev, port_num, in_mad);
+		smp_snoop(ibdev, port_num, in_mad, prev_lid);
 		node_desc_override(ibdev, out_mad);
 		node_desc_override(ibdev, out_mad);
 	}
 	}
 
 

+ 3 - 2
drivers/infiniband/hw/mlx4/main.c

@@ -699,11 +699,12 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
 	struct mlx4_ib_dev *ibdev = ibdev_ptr;
 	struct mlx4_ib_dev *ibdev = ibdev_ptr;
 	int p;
 	int p;
 
 
+	mlx4_ib_mad_cleanup(ibdev);
+	ib_unregister_device(&ibdev->ib_dev);
+
 	for (p = 1; p <= ibdev->num_ports; ++p)
 	for (p = 1; p <= ibdev->num_ports; ++p)
 		mlx4_CLOSE_PORT(dev, p);
 		mlx4_CLOSE_PORT(dev, p);
 
 
-	mlx4_ib_mad_cleanup(ibdev);
-	ib_unregister_device(&ibdev->ib_dev);
 	iounmap(ibdev->uar_map);
 	iounmap(ibdev->uar_map);
 	mlx4_uar_free(dev, &ibdev->priv_uar);
 	mlx4_uar_free(dev, &ibdev->priv_uar);
 	mlx4_pd_free(dev, ibdev->priv_pdn);
 	mlx4_pd_free(dev, ibdev->priv_pdn);

+ 11 - 11
drivers/infiniband/hw/mlx4/qp.c

@@ -71,17 +71,17 @@ enum {
 };
 };
 
 
 static const __be32 mlx4_ib_opcode[] = {
 static const __be32 mlx4_ib_opcode[] = {
-	[IB_WR_SEND]			= __constant_cpu_to_be32(MLX4_OPCODE_SEND),
-	[IB_WR_LSO]			= __constant_cpu_to_be32(MLX4_OPCODE_LSO),
-	[IB_WR_SEND_WITH_IMM]		= __constant_cpu_to_be32(MLX4_OPCODE_SEND_IMM),
-	[IB_WR_RDMA_WRITE]		= __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE),
-	[IB_WR_RDMA_WRITE_WITH_IMM]	= __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM),
-	[IB_WR_RDMA_READ]		= __constant_cpu_to_be32(MLX4_OPCODE_RDMA_READ),
-	[IB_WR_ATOMIC_CMP_AND_SWP]	= __constant_cpu_to_be32(MLX4_OPCODE_ATOMIC_CS),
-	[IB_WR_ATOMIC_FETCH_AND_ADD]	= __constant_cpu_to_be32(MLX4_OPCODE_ATOMIC_FA),
-	[IB_WR_SEND_WITH_INV]		= __constant_cpu_to_be32(MLX4_OPCODE_SEND_INVAL),
-	[IB_WR_LOCAL_INV]		= __constant_cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL),
-	[IB_WR_FAST_REG_MR]		= __constant_cpu_to_be32(MLX4_OPCODE_FMR),
+	[IB_WR_SEND]			= cpu_to_be32(MLX4_OPCODE_SEND),
+	[IB_WR_LSO]			= cpu_to_be32(MLX4_OPCODE_LSO),
+	[IB_WR_SEND_WITH_IMM]		= cpu_to_be32(MLX4_OPCODE_SEND_IMM),
+	[IB_WR_RDMA_WRITE]		= cpu_to_be32(MLX4_OPCODE_RDMA_WRITE),
+	[IB_WR_RDMA_WRITE_WITH_IMM]	= cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM),
+	[IB_WR_RDMA_READ]		= cpu_to_be32(MLX4_OPCODE_RDMA_READ),
+	[IB_WR_ATOMIC_CMP_AND_SWP]	= cpu_to_be32(MLX4_OPCODE_ATOMIC_CS),
+	[IB_WR_ATOMIC_FETCH_AND_ADD]	= cpu_to_be32(MLX4_OPCODE_ATOMIC_FA),
+	[IB_WR_SEND_WITH_INV]		= cpu_to_be32(MLX4_OPCODE_SEND_INVAL),
+	[IB_WR_LOCAL_INV]		= cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL),
+	[IB_WR_FAST_REG_MR]		= cpu_to_be32(MLX4_OPCODE_FMR),
 };
 };
 
 
 static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp)
 static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp)

+ 19 - 6
drivers/infiniband/hw/mthca/mthca_mad.c

@@ -104,7 +104,8 @@ static void update_sm_ah(struct mthca_dev *dev,
  */
  */
 static void smp_snoop(struct ib_device *ibdev,
 static void smp_snoop(struct ib_device *ibdev,
 		      u8 port_num,
 		      u8 port_num,
-		      struct ib_mad *mad)
+		      struct ib_mad *mad,
+		      u16 prev_lid)
 {
 {
 	struct ib_event event;
 	struct ib_event event;
 
 
@@ -114,6 +115,7 @@ static void smp_snoop(struct ib_device *ibdev,
 		if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) {
 		if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) {
 			struct ib_port_info *pinfo =
 			struct ib_port_info *pinfo =
 				(struct ib_port_info *) ((struct ib_smp *) mad)->data;
 				(struct ib_port_info *) ((struct ib_smp *) mad)->data;
+			u16 lid = be16_to_cpu(pinfo->lid);
 
 
 			mthca_update_rate(to_mdev(ibdev), port_num);
 			mthca_update_rate(to_mdev(ibdev), port_num);
 			update_sm_ah(to_mdev(ibdev), port_num,
 			update_sm_ah(to_mdev(ibdev), port_num,
@@ -123,12 +125,15 @@ static void smp_snoop(struct ib_device *ibdev,
 			event.device           = ibdev;
 			event.device           = ibdev;
 			event.element.port_num = port_num;
 			event.element.port_num = port_num;
 
 
-			if (pinfo->clientrereg_resv_subnetto & 0x80)
+			if (pinfo->clientrereg_resv_subnetto & 0x80) {
 				event.event    = IB_EVENT_CLIENT_REREGISTER;
 				event.event    = IB_EVENT_CLIENT_REREGISTER;
-			else
-				event.event    = IB_EVENT_LID_CHANGE;
+				ib_dispatch_event(&event);
+			}
 
 
-			ib_dispatch_event(&event);
+			if (prev_lid != lid) {
+				event.event    = IB_EVENT_LID_CHANGE;
+				ib_dispatch_event(&event);
+			}
 		}
 		}
 
 
 		if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) {
 		if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) {
@@ -196,6 +201,8 @@ int mthca_process_mad(struct ib_device *ibdev,
 	int err;
 	int err;
 	u8 status;
 	u8 status;
 	u16 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
 	u16 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
+	u16 prev_lid = 0;
+	struct ib_port_attr pattr;
 
 
 	/* Forward locally generated traps to the SM */
 	/* Forward locally generated traps to the SM */
 	if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP &&
 	if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP &&
@@ -233,6 +240,12 @@ int mthca_process_mad(struct ib_device *ibdev,
 			return IB_MAD_RESULT_SUCCESS;
 			return IB_MAD_RESULT_SUCCESS;
 	} else
 	} else
 		return IB_MAD_RESULT_SUCCESS;
 		return IB_MAD_RESULT_SUCCESS;
+	if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
+	     in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
+	    in_mad->mad_hdr.method == IB_MGMT_METHOD_SET &&
+	    in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
+	    !ib_query_port(ibdev, port_num, &pattr))
+		prev_lid = pattr.lid;
 
 
 	err = mthca_MAD_IFC(to_mdev(ibdev),
 	err = mthca_MAD_IFC(to_mdev(ibdev),
 			    mad_flags & IB_MAD_IGNORE_MKEY,
 			    mad_flags & IB_MAD_IGNORE_MKEY,
@@ -252,7 +265,7 @@ int mthca_process_mad(struct ib_device *ibdev,
 	}
 	}
 
 
 	if (!out_mad->mad_hdr.status) {
 	if (!out_mad->mad_hdr.status) {
-		smp_snoop(ibdev, port_num, in_mad);
+		smp_snoop(ibdev, port_num, in_mad, prev_lid);
 		node_desc_override(ibdev, out_mad);
 		node_desc_override(ibdev, out_mad);
 	}
 	}
 
 

+ 1 - 1
drivers/infiniband/hw/nes/nes.c

@@ -1,5 +1,5 @@
 /*
 /*
- * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
+ * Copyright (c) 2006 - 2009 Intel-NE, Inc.  All rights reserved.
  * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
  * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
  *
  *
  * This software is available to you under a choice of one of two
  * This software is available to you under a choice of one of two

+ 1 - 1
drivers/infiniband/hw/nes/nes.h

@@ -1,5 +1,5 @@
 /*
 /*
- * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
+ * Copyright (c) 2006 - 2009 Intel-NE, Inc.  All rights reserved.
  * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
  * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
  *
  *
  * This software is available to you under a choice of one of two
  * This software is available to you under a choice of one of two

Tiedoston diff-näkymää rajattu, sillä se on liian suuri
+ 385 - 193
drivers/infiniband/hw/nes/nes_cm.c


+ 8 - 4
drivers/infiniband/hw/nes/nes_cm.h

@@ -1,5 +1,5 @@
 /*
 /*
- * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
+ * Copyright (c) 2006 - 2009 Intel-NE, Inc.  All rights reserved.
  *
  *
  * This software is available to you under a choice of one of two
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -39,6 +39,9 @@
 #define NES_MANAGE_APBVT_DEL 0
 #define NES_MANAGE_APBVT_DEL 0
 #define NES_MANAGE_APBVT_ADD 1
 #define NES_MANAGE_APBVT_ADD 1
 
 
+#define NES_MPA_REQUEST_ACCEPT  1
+#define NES_MPA_REQUEST_REJECT  2
+
 /* IETF MPA -- defines, enums, structs */
 /* IETF MPA -- defines, enums, structs */
 #define IEFT_MPA_KEY_REQ  "MPA ID Req Frame"
 #define IEFT_MPA_KEY_REQ  "MPA ID Req Frame"
 #define IEFT_MPA_KEY_REP  "MPA ID Rep Frame"
 #define IEFT_MPA_KEY_REP  "MPA ID Rep Frame"
@@ -186,6 +189,7 @@ enum nes_cm_node_state {
 	NES_CM_STATE_ACCEPTING,
 	NES_CM_STATE_ACCEPTING,
 	NES_CM_STATE_MPAREQ_SENT,
 	NES_CM_STATE_MPAREQ_SENT,
 	NES_CM_STATE_MPAREQ_RCVD,
 	NES_CM_STATE_MPAREQ_RCVD,
+	NES_CM_STATE_MPAREJ_RCVD,
 	NES_CM_STATE_TSA,
 	NES_CM_STATE_TSA,
 	NES_CM_STATE_FIN_WAIT1,
 	NES_CM_STATE_FIN_WAIT1,
 	NES_CM_STATE_FIN_WAIT2,
 	NES_CM_STATE_FIN_WAIT2,
@@ -278,13 +282,12 @@ struct nes_cm_node {
 	struct nes_timer_entry	*send_entry;
 	struct nes_timer_entry	*send_entry;
 
 
 	spinlock_t                retrans_list_lock;
 	spinlock_t                retrans_list_lock;
-	struct list_head          recv_list;
-	spinlock_t                recv_list_lock;
+	struct nes_timer_entry  *recv_entry;
 
 
 	int                       send_write0;
 	int                       send_write0;
 	union {
 	union {
 		struct ietf_mpa_frame mpa_frame;
 		struct ietf_mpa_frame mpa_frame;
-		u8                    mpa_frame_buf[NES_CM_DEFAULT_MTU];
+		u8                    mpa_frame_buf[MAX_CM_BUFFER];
 	};
 	};
 	u16                       mpa_frame_size;
 	u16                       mpa_frame_size;
 	struct iw_cm_id           *cm_id;
 	struct iw_cm_id           *cm_id;
@@ -326,6 +329,7 @@ enum  nes_cm_event_type {
 	NES_CM_EVENT_MPA_REQ,
 	NES_CM_EVENT_MPA_REQ,
 	NES_CM_EVENT_MPA_CONNECT,
 	NES_CM_EVENT_MPA_CONNECT,
 	NES_CM_EVENT_MPA_ACCEPT,
 	NES_CM_EVENT_MPA_ACCEPT,
+	NES_CM_EVENT_MPA_REJECT,
 	NES_CM_EVENT_MPA_ESTABLISHED,
 	NES_CM_EVENT_MPA_ESTABLISHED,
 	NES_CM_EVENT_CONNECTED,
 	NES_CM_EVENT_CONNECTED,
 	NES_CM_EVENT_CLOSED,
 	NES_CM_EVENT_CLOSED,

+ 1 - 1
drivers/infiniband/hw/nes/nes_context.h

@@ -1,5 +1,5 @@
 /*
 /*
- * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
+ * Copyright (c) 2006 - 2009 Intel-NE, Inc.  All rights reserved.
  *
  *
  * This software is available to you under a choice of one of two
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
  * licenses.  You may choose to be licensed under the terms of the GNU

+ 13 - 4
drivers/infiniband/hw/nes/nes_hw.c

@@ -1,5 +1,5 @@
 /*
 /*
- * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
+ * Copyright (c) 2006 - 2009 Intel-NE, Inc.  All rights reserved.
  *
  *
  * This software is available to you under a choice of one of two
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -254,6 +254,7 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
 	u32 adapter_size;
 	u32 adapter_size;
 	u32 arp_table_size;
 	u32 arp_table_size;
 	u16 vendor_id;
 	u16 vendor_id;
+	u16 device_id;
 	u8  OneG_Mode;
 	u8  OneG_Mode;
 	u8  func_index;
 	u8  func_index;
 
 
@@ -356,6 +357,13 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
 		return NULL;
 		return NULL;
 	}
 	}
 
 
+	nesadapter->vendor_id = (((u32) nesadapter->mac_addr_high) << 8) |
+				(nesadapter->mac_addr_low >> 24);
+
+	pci_bus_read_config_word(nesdev->pcidev->bus, nesdev->pcidev->devfn,
+				 PCI_DEVICE_ID, &device_id);
+	nesadapter->vendor_part_id = device_id;
+
 	if (nes_init_serdes(nesdev, hw_rev, port_count, nesadapter,
 	if (nes_init_serdes(nesdev, hw_rev, port_count, nesadapter,
 							OneG_Mode)) {
 							OneG_Mode)) {
 		kfree(nesadapter);
 		kfree(nesadapter);
@@ -1636,7 +1644,6 @@ int nes_init_nic_qp(struct nes_device *nesdev, struct net_device *netdev)
 	nesvnic->post_cqp_request = nes_post_cqp_request;
 	nesvnic->post_cqp_request = nes_post_cqp_request;
 	nesvnic->mcrq_mcast_filter = NULL;
 	nesvnic->mcrq_mcast_filter = NULL;
 
 
-	spin_lock_init(&nesvnic->nic.sq_lock);
 	spin_lock_init(&nesvnic->nic.rq_lock);
 	spin_lock_init(&nesvnic->nic.rq_lock);
 
 
 	/* setup the RQ */
 	/* setup the RQ */
@@ -2261,6 +2268,8 @@ static void nes_process_aeq(struct nes_device *nesdev, struct nes_hw_aeq *aeq)
 
 
 		if (++head >= aeq_size)
 		if (++head >= aeq_size)
 			head = 0;
 			head = 0;
+
+		nes_write32(nesdev->regs + NES_AEQ_ALLOC, 1 << 16);
 	}
 	}
 	while (1);
 	while (1);
 	aeq->aeq_head = head;
 	aeq->aeq_head = head;
@@ -2622,9 +2631,9 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
 						} else
 						} else
 							break;
 							break;
 					}
 					}
-					if (skb)
-						dev_kfree_skb_any(skb);
 				}
 				}
+				if (skb)
+					dev_kfree_skb_any(skb);
 				nesnic->sq_tail++;
 				nesnic->sq_tail++;
 				nesnic->sq_tail &= nesnic->sq_size-1;
 				nesnic->sq_tail &= nesnic->sq_size-1;
 				if (sq_cqes > 128) {
 				if (sq_cqes > 128) {

+ 2 - 3
drivers/infiniband/hw/nes/nes_hw.h

@@ -1,5 +1,5 @@
 /*
 /*
-* Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
+* Copyright (c) 2006 - 2009 Intel-NE, Inc.  All rights reserved.
 *
 *
 * This software is available to you under a choice of one of two
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * licenses.  You may choose to be licensed under the terms of the GNU
@@ -61,6 +61,7 @@ enum pci_regs {
 	NES_CQ_ACK = 0x0034,
 	NES_CQ_ACK = 0x0034,
 	NES_WQE_ALLOC = 0x0040,
 	NES_WQE_ALLOC = 0x0040,
 	NES_CQE_ALLOC = 0x0044,
 	NES_CQE_ALLOC = 0x0044,
+	NES_AEQ_ALLOC = 0x0048
 };
 };
 
 
 enum indexed_regs {
 enum indexed_regs {
@@ -875,7 +876,6 @@ struct nes_hw_nic {
 	u8 replenishing_rq;
 	u8 replenishing_rq;
 	u8 reserved;
 	u8 reserved;
 
 
-	spinlock_t sq_lock;
 	spinlock_t rq_lock;
 	spinlock_t rq_lock;
 };
 };
 
 
@@ -1147,7 +1147,6 @@ struct nes_ib_device;
 struct nes_vnic {
 struct nes_vnic {
 	struct nes_ib_device *nesibdev;
 	struct nes_ib_device *nesibdev;
 	u64 sq_full;
 	u64 sq_full;
-	u64 sq_locked;
 	u64 tso_requests;
 	u64 tso_requests;
 	u64 segmented_tso_requests;
 	u64 segmented_tso_requests;
 	u64 linearized_skbs;
 	u64 linearized_skbs;

+ 62 - 80
drivers/infiniband/hw/nes/nes_nic.c

@@ -1,5 +1,5 @@
 /*
 /*
- * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
+ * Copyright (c) 2006 - 2009 Intel-NE, Inc.  All rights reserved.
  *
  *
  * This software is available to you under a choice of one of two
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -400,8 +400,7 @@ static int nes_nic_send(struct sk_buff *skb, struct net_device *netdev)
 	if (skb_headlen(skb) == skb->len) {
 	if (skb_headlen(skb) == skb->len) {
 		if (skb_headlen(skb) <= NES_FIRST_FRAG_SIZE) {
 		if (skb_headlen(skb) <= NES_FIRST_FRAG_SIZE) {
 			nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_2_1_IDX] = 0;
 			nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_2_1_IDX] = 0;
-			nesnic->tx_skb[nesnic->sq_head] = NULL;
-			dev_kfree_skb(skb);
+			nesnic->tx_skb[nesnic->sq_head] = skb;
 		}
 		}
 	} else {
 	} else {
 		/* Deal with Fragments */
 		/* Deal with Fragments */
@@ -453,7 +452,6 @@ static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
 	u32 wqe_count=1;
 	u32 wqe_count=1;
 	u32 send_rc;
 	u32 send_rc;
 	struct iphdr *iph;
 	struct iphdr *iph;
-	unsigned long flags;
 	__le16 *wqe_fragment_length;
 	__le16 *wqe_fragment_length;
 	u32 nr_frags;
 	u32 nr_frags;
 	u32 original_first_length;
 	u32 original_first_length;
@@ -480,13 +478,6 @@ static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
 	if (netif_queue_stopped(netdev))
 	if (netif_queue_stopped(netdev))
 		return NETDEV_TX_BUSY;
 		return NETDEV_TX_BUSY;
 
 
-	local_irq_save(flags);
-	if (!spin_trylock(&nesnic->sq_lock)) {
-		local_irq_restore(flags);
-		nesvnic->sq_locked++;
-		return NETDEV_TX_LOCKED;
-	}
-
 	/* Check if SQ is full */
 	/* Check if SQ is full */
 	if ((((nesnic->sq_tail+(nesnic->sq_size*2))-nesnic->sq_head) & (nesnic->sq_size - 1)) == 1) {
 	if ((((nesnic->sq_tail+(nesnic->sq_size*2))-nesnic->sq_head) & (nesnic->sq_size - 1)) == 1) {
 		if (!netif_queue_stopped(netdev)) {
 		if (!netif_queue_stopped(netdev)) {
@@ -498,7 +489,6 @@ static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
 			}
 			}
 		}
 		}
 		nesvnic->sq_full++;
 		nesvnic->sq_full++;
-		spin_unlock_irqrestore(&nesnic->sq_lock, flags);
 		return NETDEV_TX_BUSY;
 		return NETDEV_TX_BUSY;
 	}
 	}
 
 
@@ -531,7 +521,6 @@ sq_no_longer_full:
 					}
 					}
 				}
 				}
 				nesvnic->sq_full++;
 				nesvnic->sq_full++;
-				spin_unlock_irqrestore(&nesnic->sq_lock, flags);
 				nes_debug(NES_DBG_NIC_TX, "%s: HNIC SQ full- TSO request has too many frags!\n",
 				nes_debug(NES_DBG_NIC_TX, "%s: HNIC SQ full- TSO request has too many frags!\n",
 						netdev->name);
 						netdev->name);
 				return NETDEV_TX_BUSY;
 				return NETDEV_TX_BUSY;
@@ -656,17 +645,13 @@ tso_sq_no_longer_full:
 			skb_set_transport_header(skb, hoffset);
 			skb_set_transport_header(skb, hoffset);
 			skb_set_network_header(skb, nhoffset);
 			skb_set_network_header(skb, nhoffset);
 			send_rc = nes_nic_send(skb, netdev);
 			send_rc = nes_nic_send(skb, netdev);
-			if (send_rc != NETDEV_TX_OK) {
-				spin_unlock_irqrestore(&nesnic->sq_lock, flags);
+			if (send_rc != NETDEV_TX_OK)
 				return NETDEV_TX_OK;
 				return NETDEV_TX_OK;
-			}
 		}
 		}
 	} else {
 	} else {
 		send_rc = nes_nic_send(skb, netdev);
 		send_rc = nes_nic_send(skb, netdev);
-		if (send_rc != NETDEV_TX_OK) {
-			spin_unlock_irqrestore(&nesnic->sq_lock, flags);
+		if (send_rc != NETDEV_TX_OK)
 			return NETDEV_TX_OK;
 			return NETDEV_TX_OK;
-		}
 	}
 	}
 
 
 	barrier();
 	barrier();
@@ -676,7 +661,6 @@ tso_sq_no_longer_full:
 				(wqe_count << 24) | (1 << 23) | nesvnic->nic.qp_id);
 				(wqe_count << 24) | (1 << 23) | nesvnic->nic.qp_id);
 
 
 	netdev->trans_start = jiffies;
 	netdev->trans_start = jiffies;
-	spin_unlock_irqrestore(&nesnic->sq_lock, flags);
 
 
 	return NETDEV_TX_OK;
 	return NETDEV_TX_OK;
 }
 }
@@ -1012,7 +996,6 @@ static const char nes_ethtool_stringset[][ETH_GSTRING_LEN] = {
 	"Pause Frames Received",
 	"Pause Frames Received",
 	"Internal Routing Errors",
 	"Internal Routing Errors",
 	"SQ SW Dropped SKBs",
 	"SQ SW Dropped SKBs",
-	"SQ Locked",
 	"SQ Full",
 	"SQ Full",
 	"Segmented TSO Requests",
 	"Segmented TSO Requests",
 	"Rx Symbol Errors",
 	"Rx Symbol Errors",
@@ -1129,16 +1112,17 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
 	struct nes_device *nesdev = nesvnic->nesdev;
 	struct nes_device *nesdev = nesvnic->nesdev;
 	u32 nic_count;
 	u32 nic_count;
 	u32 u32temp;
 	u32 u32temp;
+	u32 index = 0;
 
 
 	target_ethtool_stats->n_stats = NES_ETHTOOL_STAT_COUNT;
 	target_ethtool_stats->n_stats = NES_ETHTOOL_STAT_COUNT;
-	target_stat_values[0] = nesvnic->nesdev->link_status_interrupts;
-	target_stat_values[1] = nesvnic->linearized_skbs;
-	target_stat_values[2] = nesvnic->tso_requests;
+	target_stat_values[index] = nesvnic->nesdev->link_status_interrupts;
+	target_stat_values[++index] = nesvnic->linearized_skbs;
+	target_stat_values[++index] = nesvnic->tso_requests;
 
 
 	u32temp = nes_read_indexed(nesdev,
 	u32temp = nes_read_indexed(nesdev,
 			NES_IDX_MAC_TX_PAUSE_FRAMES + (nesvnic->nesdev->mac_index*0x200));
 			NES_IDX_MAC_TX_PAUSE_FRAMES + (nesvnic->nesdev->mac_index*0x200));
 	nesvnic->nesdev->mac_pause_frames_sent += u32temp;
 	nesvnic->nesdev->mac_pause_frames_sent += u32temp;
-	target_stat_values[3] = nesvnic->nesdev->mac_pause_frames_sent;
+	target_stat_values[++index] = nesvnic->nesdev->mac_pause_frames_sent;
 
 
 	u32temp = nes_read_indexed(nesdev,
 	u32temp = nes_read_indexed(nesdev,
 			NES_IDX_MAC_RX_PAUSE_FRAMES + (nesvnic->nesdev->mac_index*0x200));
 			NES_IDX_MAC_RX_PAUSE_FRAMES + (nesvnic->nesdev->mac_index*0x200));
@@ -1209,60 +1193,59 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
 		nesvnic->endnode_ipv4_tcp_retransmits += u32temp;
 		nesvnic->endnode_ipv4_tcp_retransmits += u32temp;
 	}
 	}
 
 
-	target_stat_values[4] = nesvnic->nesdev->mac_pause_frames_received;
-	target_stat_values[5] = nesdev->nesadapter->nic_rx_eth_route_err;
-	target_stat_values[6] = nesvnic->tx_sw_dropped;
-	target_stat_values[7] = nesvnic->sq_locked;
-	target_stat_values[8] = nesvnic->sq_full;
-	target_stat_values[9] = nesvnic->segmented_tso_requests;
-	target_stat_values[10] = nesvnic->nesdev->mac_rx_symbol_err_frames;
-	target_stat_values[11] = nesvnic->nesdev->mac_rx_jabber_frames;
-	target_stat_values[12] = nesvnic->nesdev->mac_rx_oversized_frames;
-	target_stat_values[13] = nesvnic->nesdev->mac_rx_short_frames;
-	target_stat_values[14] = nesvnic->endnode_nstat_rx_discard;
-	target_stat_values[15] = nesvnic->endnode_nstat_rx_octets;
-	target_stat_values[16] = nesvnic->endnode_nstat_rx_frames;
-	target_stat_values[17] = nesvnic->endnode_nstat_tx_octets;
-	target_stat_values[18] = nesvnic->endnode_nstat_tx_frames;
-	target_stat_values[19] = mh_detected;
-	target_stat_values[20] = mh_pauses_sent;
-	target_stat_values[21] = nesvnic->endnode_ipv4_tcp_retransmits;
-	target_stat_values[22] = atomic_read(&cm_connects);
-	target_stat_values[23] = atomic_read(&cm_accepts);
-	target_stat_values[24] = atomic_read(&cm_disconnects);
-	target_stat_values[25] = atomic_read(&cm_connecteds);
-	target_stat_values[26] = atomic_read(&cm_connect_reqs);
-	target_stat_values[27] = atomic_read(&cm_rejects);
-	target_stat_values[28] = atomic_read(&mod_qp_timouts);
-	target_stat_values[29] = atomic_read(&qps_created);
-	target_stat_values[30] = atomic_read(&sw_qps_destroyed);
-	target_stat_values[31] = atomic_read(&qps_destroyed);
-	target_stat_values[32] = atomic_read(&cm_closes);
-	target_stat_values[33] = cm_packets_sent;
-	target_stat_values[34] = cm_packets_bounced;
-	target_stat_values[35] = cm_packets_created;
-	target_stat_values[36] = cm_packets_received;
-	target_stat_values[37] = cm_packets_dropped;
-	target_stat_values[38] = cm_packets_retrans;
-	target_stat_values[39] = cm_listens_created;
-	target_stat_values[40] = cm_listens_destroyed;
-	target_stat_values[41] = cm_backlog_drops;
-	target_stat_values[42] = atomic_read(&cm_loopbacks);
-	target_stat_values[43] = atomic_read(&cm_nodes_created);
-	target_stat_values[44] = atomic_read(&cm_nodes_destroyed);
-	target_stat_values[45] = atomic_read(&cm_accel_dropped_pkts);
-	target_stat_values[46] = atomic_read(&cm_resets_recvd);
-	target_stat_values[47] = int_mod_timer_init;
-	target_stat_values[48] = int_mod_cq_depth_1;
-	target_stat_values[49] = int_mod_cq_depth_4;
-	target_stat_values[50] = int_mod_cq_depth_16;
-	target_stat_values[51] = int_mod_cq_depth_24;
-	target_stat_values[52] = int_mod_cq_depth_32;
-	target_stat_values[53] = int_mod_cq_depth_128;
-	target_stat_values[54] = int_mod_cq_depth_256;
-	target_stat_values[55] = nesvnic->lro_mgr.stats.aggregated;
-	target_stat_values[56] = nesvnic->lro_mgr.stats.flushed;
-	target_stat_values[57] = nesvnic->lro_mgr.stats.no_desc;
+	target_stat_values[++index] = nesvnic->nesdev->mac_pause_frames_received;
+	target_stat_values[++index] = nesdev->nesadapter->nic_rx_eth_route_err;
+	target_stat_values[++index] = nesvnic->tx_sw_dropped;
+	target_stat_values[++index] = nesvnic->sq_full;
+	target_stat_values[++index] = nesvnic->segmented_tso_requests;
+	target_stat_values[++index] = nesvnic->nesdev->mac_rx_symbol_err_frames;
+	target_stat_values[++index] = nesvnic->nesdev->mac_rx_jabber_frames;
+	target_stat_values[++index] = nesvnic->nesdev->mac_rx_oversized_frames;
+	target_stat_values[++index] = nesvnic->nesdev->mac_rx_short_frames;
+	target_stat_values[++index] = nesvnic->endnode_nstat_rx_discard;
+	target_stat_values[++index] = nesvnic->endnode_nstat_rx_octets;
+	target_stat_values[++index] = nesvnic->endnode_nstat_rx_frames;
+	target_stat_values[++index] = nesvnic->endnode_nstat_tx_octets;
+	target_stat_values[++index] = nesvnic->endnode_nstat_tx_frames;
+	target_stat_values[++index] = mh_detected;
+	target_stat_values[++index] = mh_pauses_sent;
+	target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
+	target_stat_values[++index] = atomic_read(&cm_connects);
+	target_stat_values[++index] = atomic_read(&cm_accepts);
+	target_stat_values[++index] = atomic_read(&cm_disconnects);
+	target_stat_values[++index] = atomic_read(&cm_connecteds);
+	target_stat_values[++index] = atomic_read(&cm_connect_reqs);
+	target_stat_values[++index] = atomic_read(&cm_rejects);
+	target_stat_values[++index] = atomic_read(&mod_qp_timouts);
+	target_stat_values[++index] = atomic_read(&qps_created);
+	target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
+	target_stat_values[++index] = atomic_read(&qps_destroyed);
+	target_stat_values[++index] = atomic_read(&cm_closes);
+	target_stat_values[++index] = cm_packets_sent;
+	target_stat_values[++index] = cm_packets_bounced;
+	target_stat_values[++index] = cm_packets_created;
+	target_stat_values[++index] = cm_packets_received;
+	target_stat_values[++index] = cm_packets_dropped;
+	target_stat_values[++index] = cm_packets_retrans;
+	target_stat_values[++index] = cm_listens_created;
+	target_stat_values[++index] = cm_listens_destroyed;
+	target_stat_values[++index] = cm_backlog_drops;
+	target_stat_values[++index] = atomic_read(&cm_loopbacks);
+	target_stat_values[++index] = atomic_read(&cm_nodes_created);
+	target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
+	target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
+	target_stat_values[++index] = atomic_read(&cm_resets_recvd);
+	target_stat_values[++index] = int_mod_timer_init;
+	target_stat_values[++index] = int_mod_cq_depth_1;
+	target_stat_values[++index] = int_mod_cq_depth_4;
+	target_stat_values[++index] = int_mod_cq_depth_16;
+	target_stat_values[++index] = int_mod_cq_depth_24;
+	target_stat_values[++index] = int_mod_cq_depth_32;
+	target_stat_values[++index] = int_mod_cq_depth_128;
+	target_stat_values[++index] = int_mod_cq_depth_256;
+	target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
+	target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
+	target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
 
 
 }
 }
 
 
@@ -1616,7 +1599,6 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
 	nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
 	nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
 	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
 	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
 	netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
 	netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
-	netdev->features |= NETIF_F_LLTX;
 
 
 	/* Fill in the port structure */
 	/* Fill in the port structure */
 	nesvnic->netdev = netdev;
 	nesvnic->netdev = netdev;

+ 1 - 1
drivers/infiniband/hw/nes/nes_user.h

@@ -1,5 +1,5 @@
 /*
 /*
- * Copyright (c) 2006 - 2008 NetEffect.  All rights reserved.
+ * Copyright (c) 2006 - 2009 Intel-NE, Inc.  All rights reserved.
  * Copyright (c) 2005 Topspin Communications.  All rights reserved.
  * Copyright (c) 2005 Topspin Communications.  All rights reserved.
  * Copyright (c) 2005 Cisco Systems.  All rights reserved.
  * Copyright (c) 2005 Cisco Systems.  All rights reserved.
  * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
  * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.

+ 1 - 1
drivers/infiniband/hw/nes/nes_utils.c

@@ -1,5 +1,5 @@
 /*
 /*
- * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
+ * Copyright (c) 2006 - 2009 Intel-NE, Inc.  All rights reserved.
  *
  *
  * This software is available to you under a choice of one of two
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
  * licenses.  You may choose to be licensed under the terms of the GNU

+ 170 - 79
drivers/infiniband/hw/nes/nes_verbs.c

@@ -1,5 +1,5 @@
 /*
 /*
- * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
+ * Copyright (c) 2006 - 2009 Intel-NE, Inc.  All rights reserved.
  *
  *
  * This software is available to you under a choice of one of two
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -551,6 +551,7 @@ static int nes_dealloc_fmr(struct ib_fmr *ibfmr)
 	struct nes_device *nesdev = nesvnic->nesdev;
 	struct nes_device *nesdev = nesvnic->nesdev;
 	struct nes_adapter *nesadapter = nesdev->nesadapter;
 	struct nes_adapter *nesadapter = nesdev->nesadapter;
 	int i = 0;
 	int i = 0;
+	int rc;
 
 
 	/* free the resources */
 	/* free the resources */
 	if (nesfmr->leaf_pbl_cnt == 0) {
 	if (nesfmr->leaf_pbl_cnt == 0) {
@@ -572,7 +573,9 @@ static int nes_dealloc_fmr(struct ib_fmr *ibfmr)
 	nesmr->ibmw.rkey = ibfmr->rkey;
 	nesmr->ibmw.rkey = ibfmr->rkey;
 	nesmr->ibmw.uobject = NULL;
 	nesmr->ibmw.uobject = NULL;
 
 
-	if (nesfmr->nesmr.pbls_used != 0) {
+	rc = nes_dealloc_mw(&nesmr->ibmw);
+
+	if ((rc == 0) && (nesfmr->nesmr.pbls_used != 0)) {
 		spin_lock_irqsave(&nesadapter->pbl_lock, flags);
 		spin_lock_irqsave(&nesadapter->pbl_lock, flags);
 		if (nesfmr->nesmr.pbl_4k) {
 		if (nesfmr->nesmr.pbl_4k) {
 			nesadapter->free_4kpbl += nesfmr->nesmr.pbls_used;
 			nesadapter->free_4kpbl += nesfmr->nesmr.pbls_used;
@@ -584,7 +587,7 @@ static int nes_dealloc_fmr(struct ib_fmr *ibfmr)
 		spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
 		spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
 	}
 	}
 
 
-	return nes_dealloc_mw(&nesmr->ibmw);
+	return rc;
 }
 }
 
 
 
 
@@ -1884,21 +1887,75 @@ static int nes_destroy_cq(struct ib_cq *ib_cq)
 	return ret;
 	return ret;
 }
 }
 
 
+/**
+ * root_256
+ */
+static u32 root_256(struct nes_device *nesdev,
+		    struct nes_root_vpbl *root_vpbl,
+		    struct nes_root_vpbl *new_root,
+		    u16 pbl_count_4k,
+		    u16 pbl_count_256)
+{
+	u64 leaf_pbl;
+	int i, j, k;
+
+	if (pbl_count_4k == 1) {
+		new_root->pbl_vbase = pci_alloc_consistent(nesdev->pcidev,
+						512, &new_root->pbl_pbase);
+
+		if (new_root->pbl_vbase == NULL)
+			return 0;
+
+		leaf_pbl = (u64)root_vpbl->pbl_pbase;
+		for (i = 0; i < 16; i++) {
+			new_root->pbl_vbase[i].pa_low =
+				cpu_to_le32((u32)leaf_pbl);
+			new_root->pbl_vbase[i].pa_high =
+				cpu_to_le32((u32)((((u64)leaf_pbl) >> 32)));
+			leaf_pbl += 256;
+		}
+	} else {
+		for (i = 3; i >= 0; i--) {
+			j = i * 16;
+			root_vpbl->pbl_vbase[j] = root_vpbl->pbl_vbase[i];
+			leaf_pbl = le32_to_cpu(root_vpbl->pbl_vbase[j].pa_low) +
+			    (((u64)le32_to_cpu(root_vpbl->pbl_vbase[j].pa_high))
+				<< 32);
+			for (k = 1; k < 16; k++) {
+				leaf_pbl += 256;
+				root_vpbl->pbl_vbase[j + k].pa_low =
+						cpu_to_le32((u32)leaf_pbl);
+				root_vpbl->pbl_vbase[j + k].pa_high =
+				    cpu_to_le32((u32)((((u64)leaf_pbl) >> 32)));
+			}
+		}
+	}
+
+	return 1;
+}
+
 
 
 /**
 /**
  * nes_reg_mr
  * nes_reg_mr
  */
  */
 static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
 static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
 		u32 stag, u64 region_length, struct nes_root_vpbl *root_vpbl,
 		u32 stag, u64 region_length, struct nes_root_vpbl *root_vpbl,
-		dma_addr_t single_buffer, u16 pbl_count, u16 residual_page_count,
-		int acc, u64 *iova_start)
+		dma_addr_t single_buffer, u16 pbl_count_4k,
+		u16 residual_page_count_4k, int acc, u64 *iova_start,
+		u16 *actual_pbl_cnt, u8 *used_4k_pbls)
 {
 {
 	struct nes_hw_cqp_wqe *cqp_wqe;
 	struct nes_hw_cqp_wqe *cqp_wqe;
 	struct nes_cqp_request *cqp_request;
 	struct nes_cqp_request *cqp_request;
 	unsigned long flags;
 	unsigned long flags;
 	int ret;
 	int ret;
 	struct nes_adapter *nesadapter = nesdev->nesadapter;
 	struct nes_adapter *nesadapter = nesdev->nesadapter;
-	/* int count; */
+	uint pg_cnt = 0;
+	u16 pbl_count_256;
+	u16 pbl_count = 0;
+	u8  use_256_pbls = 0;
+	u8  use_4k_pbls = 0;
+	u16 use_two_level = (pbl_count_4k > 1) ? 1 : 0;
+	struct nes_root_vpbl new_root = {0, 0, 0};
 	u32 opcode = 0;
 	u32 opcode = 0;
 	u16 major_code;
 	u16 major_code;
 
 
@@ -1911,41 +1968,70 @@ static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
 	cqp_request->waiting = 1;
 	cqp_request->waiting = 1;
 	cqp_wqe = &cqp_request->cqp_wqe;
 	cqp_wqe = &cqp_request->cqp_wqe;
 
 
-	spin_lock_irqsave(&nesadapter->pbl_lock, flags);
-	/* track PBL resources */
-	if (pbl_count != 0) {
-		if (pbl_count > 1) {
-			/* Two level PBL */
-			if ((pbl_count+1) > nesadapter->free_4kpbl) {
-				nes_debug(NES_DBG_MR, "Out of 4KB Pbls for two level request.\n");
-				spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
-				nes_free_cqp_request(nesdev, cqp_request);
-				return -ENOMEM;
-			} else {
-				nesadapter->free_4kpbl -= pbl_count+1;
-			}
-		} else if (residual_page_count > 32) {
-			if (pbl_count > nesadapter->free_4kpbl) {
-				nes_debug(NES_DBG_MR, "Out of 4KB Pbls.\n");
-				spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
-				nes_free_cqp_request(nesdev, cqp_request);
-				return -ENOMEM;
-			} else {
-				nesadapter->free_4kpbl -= pbl_count;
+	if (pbl_count_4k) {
+		spin_lock_irqsave(&nesadapter->pbl_lock, flags);
+
+		pg_cnt = ((pbl_count_4k - 1) * 512) + residual_page_count_4k;
+		pbl_count_256 = (pg_cnt + 31) / 32;
+		if (pg_cnt <= 32) {
+			if (pbl_count_256 <= nesadapter->free_256pbl)
+				use_256_pbls = 1;
+			else if (pbl_count_4k <= nesadapter->free_4kpbl)
+				use_4k_pbls = 1;
+		} else if (pg_cnt <= 2048) {
+			if (((pbl_count_4k + use_two_level) <= nesadapter->free_4kpbl) &&
+			    (nesadapter->free_4kpbl > (nesadapter->max_4kpbl >> 1))) {
+				use_4k_pbls = 1;
+			} else if ((pbl_count_256 + 1) <= nesadapter->free_256pbl) {
+				use_256_pbls = 1;
+				use_two_level = 1;
+			} else if ((pbl_count_4k + use_two_level) <= nesadapter->free_4kpbl) {
+				use_4k_pbls = 1;
 			}
 			}
 		} else {
 		} else {
-			if (pbl_count > nesadapter->free_256pbl) {
-				nes_debug(NES_DBG_MR, "Out of 256B Pbls.\n");
-				spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
-				nes_free_cqp_request(nesdev, cqp_request);
-				return -ENOMEM;
-			} else {
-				nesadapter->free_256pbl -= pbl_count;
-			}
+			if ((pbl_count_4k + 1) <= nesadapter->free_4kpbl)
+				use_4k_pbls = 1;
 		}
 		}
+
+		if (use_256_pbls) {
+			pbl_count = pbl_count_256;
+			nesadapter->free_256pbl -= pbl_count + use_two_level;
+		} else if (use_4k_pbls) {
+			pbl_count =  pbl_count_4k;
+			nesadapter->free_4kpbl -= pbl_count + use_two_level;
+		} else {
+			spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+			nes_debug(NES_DBG_MR, "Out of Pbls\n");
+			nes_free_cqp_request(nesdev, cqp_request);
+			return -ENOMEM;
+		}
+
+		spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
 	}
 	}
 
 
-	spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+	if (use_256_pbls && use_two_level) {
+		if (root_256(nesdev, root_vpbl, &new_root, pbl_count_4k, pbl_count_256) == 1) {
+			if (new_root.pbl_pbase != 0)
+				root_vpbl = &new_root;
+		} else {
+			spin_lock_irqsave(&nesadapter->pbl_lock, flags);
+			nesadapter->free_256pbl += pbl_count_256 + use_two_level;
+			use_256_pbls = 0;
+
+			if (pbl_count_4k == 1)
+				use_two_level = 0;
+			pbl_count = pbl_count_4k;
+
+			if ((pbl_count_4k + use_two_level) <= nesadapter->free_4kpbl) {
+				nesadapter->free_4kpbl -= pbl_count + use_two_level;
+				use_4k_pbls = 1;
+			}
+			spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+
+			if (use_4k_pbls == 0)
+				return -ENOMEM;
+		}
+	}
 
 
 	opcode = NES_CQP_REGISTER_STAG | NES_CQP_STAG_RIGHTS_LOCAL_READ |
 	opcode = NES_CQP_REGISTER_STAG | NES_CQP_STAG_RIGHTS_LOCAL_READ |
 					NES_CQP_STAG_VA_TO | NES_CQP_STAG_MR;
 					NES_CQP_STAG_VA_TO | NES_CQP_STAG_MR;
@@ -1974,10 +2060,9 @@ static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
 	} else {
 	} else {
 		set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PA_LOW_IDX, root_vpbl->pbl_pbase);
 		set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PA_LOW_IDX, root_vpbl->pbl_pbase);
 		set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PBL_BLK_COUNT_IDX, pbl_count);
 		set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PBL_BLK_COUNT_IDX, pbl_count);
-		set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PBL_LEN_IDX,
-				(((pbl_count - 1) * 4096) + (residual_page_count*8)));
+		set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PBL_LEN_IDX, (pg_cnt * 8));
 
 
-		if ((pbl_count > 1) || (residual_page_count > 32))
+		if (use_4k_pbls)
 			cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] |= cpu_to_le32(NES_CQP_STAG_PBL_BLK_SIZE);
 			cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] |= cpu_to_le32(NES_CQP_STAG_PBL_BLK_SIZE);
 	}
 	}
 	barrier();
 	barrier();
@@ -1994,13 +2079,25 @@ static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
 	major_code = cqp_request->major_code;
 	major_code = cqp_request->major_code;
 	nes_put_cqp_request(nesdev, cqp_request);
 	nes_put_cqp_request(nesdev, cqp_request);
 
 
+	if ((!ret || major_code) && pbl_count != 0) {
+		spin_lock_irqsave(&nesadapter->pbl_lock, flags);
+		if (use_256_pbls)
+			nesadapter->free_256pbl += pbl_count + use_two_level;
+		else if (use_4k_pbls)
+			nesadapter->free_4kpbl += pbl_count + use_two_level;
+		spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+	}
+	if (new_root.pbl_pbase)
+		pci_free_consistent(nesdev->pcidev, 512, new_root.pbl_vbase,
+				    new_root.pbl_pbase);
+
 	if (!ret)
 	if (!ret)
 		return -ETIME;
 		return -ETIME;
 	else if (major_code)
 	else if (major_code)
 		return -EIO;
 		return -EIO;
-	else
-		return 0;
 
 
+	*actual_pbl_cnt = pbl_count + use_two_level;
+	*used_4k_pbls = use_4k_pbls;
 	return 0;
 	return 0;
 }
 }
 
 
@@ -2165,18 +2262,14 @@ static struct ib_mr *nes_reg_phys_mr(struct ib_pd *ib_pd,
 		pbl_count = root_pbl_index;
 		pbl_count = root_pbl_index;
 	}
 	}
 	ret = nes_reg_mr(nesdev, nespd, stag, region_length, &root_vpbl,
 	ret = nes_reg_mr(nesdev, nespd, stag, region_length, &root_vpbl,
-			buffer_list[0].addr, pbl_count, (u16)cur_pbl_index, acc, iova_start);
+			buffer_list[0].addr, pbl_count, (u16)cur_pbl_index, acc, iova_start,
+			&nesmr->pbls_used, &nesmr->pbl_4k);
 
 
 	if (ret == 0) {
 	if (ret == 0) {
 		nesmr->ibmr.rkey = stag;
 		nesmr->ibmr.rkey = stag;
 		nesmr->ibmr.lkey = stag;
 		nesmr->ibmr.lkey = stag;
 		nesmr->mode = IWNES_MEMREG_TYPE_MEM;
 		nesmr->mode = IWNES_MEMREG_TYPE_MEM;
 		ibmr = &nesmr->ibmr;
 		ibmr = &nesmr->ibmr;
-		nesmr->pbl_4k = ((pbl_count > 1) || (cur_pbl_index > 32)) ? 1 : 0;
-		nesmr->pbls_used = pbl_count;
-		if (pbl_count > 1) {
-			nesmr->pbls_used++;
-		}
 	} else {
 	} else {
 		kfree(nesmr);
 		kfree(nesmr);
 		ibmr = ERR_PTR(-ENOMEM);
 		ibmr = ERR_PTR(-ENOMEM);
@@ -2454,8 +2547,9 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 					stag, (unsigned int)iova_start,
 					stag, (unsigned int)iova_start,
 					(unsigned int)region_length, stag_index,
 					(unsigned int)region_length, stag_index,
 					(unsigned long long)region->length, pbl_count);
 					(unsigned long long)region->length, pbl_count);
-			ret = nes_reg_mr( nesdev, nespd, stag, region->length, &root_vpbl,
-					first_dma_addr, pbl_count, (u16)cur_pbl_index, acc, &iova_start);
+			ret = nes_reg_mr(nesdev, nespd, stag, region->length, &root_vpbl,
+					 first_dma_addr, pbl_count, (u16)cur_pbl_index, acc,
+					 &iova_start, &nesmr->pbls_used, &nesmr->pbl_4k);
 
 
 			nes_debug(NES_DBG_MR, "ret=%d\n", ret);
 			nes_debug(NES_DBG_MR, "ret=%d\n", ret);
 
 
@@ -2464,11 +2558,6 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 				nesmr->ibmr.lkey = stag;
 				nesmr->ibmr.lkey = stag;
 				nesmr->mode = IWNES_MEMREG_TYPE_MEM;
 				nesmr->mode = IWNES_MEMREG_TYPE_MEM;
 				ibmr = &nesmr->ibmr;
 				ibmr = &nesmr->ibmr;
-				nesmr->pbl_4k = ((pbl_count > 1) || (cur_pbl_index > 32)) ? 1 : 0;
-				nesmr->pbls_used = pbl_count;
-				if (pbl_count > 1) {
-					nesmr->pbls_used++;
-				}
 			} else {
 			} else {
 				ib_umem_release(region);
 				ib_umem_release(region);
 				kfree(nesmr);
 				kfree(nesmr);
@@ -2607,24 +2696,6 @@ static int nes_dereg_mr(struct ib_mr *ib_mr)
 	cqp_request->waiting = 1;
 	cqp_request->waiting = 1;
 	cqp_wqe = &cqp_request->cqp_wqe;
 	cqp_wqe = &cqp_request->cqp_wqe;
 
 
-	spin_lock_irqsave(&nesadapter->pbl_lock, flags);
-	if (nesmr->pbls_used != 0) {
-		if (nesmr->pbl_4k) {
-			nesadapter->free_4kpbl += nesmr->pbls_used;
-			if (nesadapter->free_4kpbl > nesadapter->max_4kpbl) {
-				printk(KERN_ERR PFX "free 4KB PBLs(%u) has exceeded the max(%u)\n",
-						nesadapter->free_4kpbl, nesadapter->max_4kpbl);
-			}
-		} else {
-			nesadapter->free_256pbl += nesmr->pbls_used;
-			if (nesadapter->free_256pbl > nesadapter->max_256pbl) {
-				printk(KERN_ERR PFX "free 256B PBLs(%u) has exceeded the max(%u)\n",
-						nesadapter->free_256pbl, nesadapter->max_256pbl);
-			}
-		}
-	}
-
-	spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
 	nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
 	nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
 	set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
 	set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
 			NES_CQP_DEALLOCATE_STAG | NES_CQP_STAG_VA_TO |
 			NES_CQP_DEALLOCATE_STAG | NES_CQP_STAG_VA_TO |
@@ -2642,11 +2713,6 @@ static int nes_dereg_mr(struct ib_mr *ib_mr)
 			" CQP Major:Minor codes = 0x%04X:0x%04X\n",
 			" CQP Major:Minor codes = 0x%04X:0x%04X\n",
 			ib_mr->rkey, ret, cqp_request->major_code, cqp_request->minor_code);
 			ib_mr->rkey, ret, cqp_request->major_code, cqp_request->minor_code);
 
 
-	nes_free_resource(nesadapter, nesadapter->allocated_mrs,
-			(ib_mr->rkey & 0x0fffff00) >> 8);
-
-	kfree(nesmr);
-
 	major_code = cqp_request->major_code;
 	major_code = cqp_request->major_code;
 	minor_code = cqp_request->minor_code;
 	minor_code = cqp_request->minor_code;
 
 
@@ -2662,8 +2728,33 @@ static int nes_dereg_mr(struct ib_mr *ib_mr)
 				" to destroy STag, ib_mr=%p, rkey = 0x%08X\n",
 				" to destroy STag, ib_mr=%p, rkey = 0x%08X\n",
 				major_code, minor_code, ib_mr, ib_mr->rkey);
 				major_code, minor_code, ib_mr, ib_mr->rkey);
 		return -EIO;
 		return -EIO;
-	} else
-		return 0;
+	}
+
+	if (nesmr->pbls_used != 0) {
+		spin_lock_irqsave(&nesadapter->pbl_lock, flags);
+		if (nesmr->pbl_4k) {
+			nesadapter->free_4kpbl += nesmr->pbls_used;
+			if (nesadapter->free_4kpbl > nesadapter->max_4kpbl)
+				printk(KERN_ERR PFX "free 4KB PBLs(%u) has "
+					"exceeded the max(%u)\n",
+					nesadapter->free_4kpbl,
+					nesadapter->max_4kpbl);
+		} else {
+			nesadapter->free_256pbl += nesmr->pbls_used;
+			if (nesadapter->free_256pbl > nesadapter->max_256pbl)
+				printk(KERN_ERR PFX "free 256B PBLs(%u) has "
+					"exceeded the max(%u)\n",
+					nesadapter->free_256pbl,
+					nesadapter->max_256pbl);
+		}
+		spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+	}
+	nes_free_resource(nesadapter, nesadapter->allocated_mrs,
+			(ib_mr->rkey & 0x0fffff00) >> 8);
+
+	kfree(nesmr);
+
+	return 0;
 }
 }
 
 
 
 

+ 1 - 1
drivers/infiniband/hw/nes/nes_verbs.h

@@ -1,5 +1,5 @@
 /*
 /*
- * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
+ * Copyright (c) 2006 - 2009 Intel-NE, Inc.  All rights reserved.
  * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
  * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
  *
  *
  * This software is available to you under a choice of one of two
  * This software is available to you under a choice of one of two

+ 7 - 2
drivers/infiniband/ulp/ipoib/ipoib_main.c

@@ -660,8 +660,12 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
 
 
 	path = __path_find(dev, phdr->hwaddr + 4);
 	path = __path_find(dev, phdr->hwaddr + 4);
 	if (!path || !path->valid) {
 	if (!path || !path->valid) {
-		if (!path)
+		int new_path = 0;
+
+		if (!path) {
 			path = path_rec_create(dev, phdr->hwaddr + 4);
 			path = path_rec_create(dev, phdr->hwaddr + 4);
+			new_path = 1;
+		}
 		if (path) {
 		if (path) {
 			/* put pseudoheader back on for next time */
 			/* put pseudoheader back on for next time */
 			skb_push(skb, sizeof *phdr);
 			skb_push(skb, sizeof *phdr);
@@ -669,7 +673,8 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
 
 
 			if (!path->query && path_rec_start(dev, path)) {
 			if (!path->query && path_rec_start(dev, path)) {
 				spin_unlock_irqrestore(&priv->lock, flags);
 				spin_unlock_irqrestore(&priv->lock, flags);
-				path_free(dev, path);
+				if (new_path)
+					path_free(dev, path);
 				return;
 				return;
 			} else
 			} else
 				__path_add(dev, path);
 				__path_add(dev, path);

+ 0 - 7
drivers/infiniband/ulp/iser/iser_verbs.c

@@ -401,13 +401,6 @@ static void iser_route_handler(struct rdma_cm_id *cma_id)
 	if (ret)
 	if (ret)
 		goto failure;
 		goto failure;
 
 
-	iser_dbg("path.mtu is %d setting it to %d\n",
-		 cma_id->route.path_rec->mtu, IB_MTU_1024);
-
-	/* we must set the MTU to 1024 as this is what the target is assuming */
-	if (cma_id->route.path_rec->mtu > IB_MTU_1024)
-		cma_id->route.path_rec->mtu = IB_MTU_1024;
-
 	memset(&conn_param, 0, sizeof conn_param);
 	memset(&conn_param, 0, sizeof conn_param);
 	conn_param.responder_resources = 4;
 	conn_param.responder_resources = 4;
 	conn_param.initiator_depth     = 1;
 	conn_param.initiator_depth     = 1;

+ 1 - 1
drivers/net/mlx4/Makefile

@@ -1,7 +1,7 @@
 obj-$(CONFIG_MLX4_CORE)		+= mlx4_core.o
 obj-$(CONFIG_MLX4_CORE)		+= mlx4_core.o
 
 
 mlx4_core-y :=	alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
 mlx4_core-y :=	alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
-		mr.o pd.o port.o profile.o qp.o reset.o srq.o
+		mr.o pd.o port.o profile.o qp.o reset.o sense.o srq.o
 
 
 obj-$(CONFIG_MLX4_EN)               += mlx4_en.o
 obj-$(CONFIG_MLX4_EN)               += mlx4_en.o
 
 

+ 2 - 14
drivers/net/mlx4/catas.c

@@ -42,7 +42,6 @@ enum {
 static DEFINE_SPINLOCK(catas_lock);
 static DEFINE_SPINLOCK(catas_lock);
 
 
 static LIST_HEAD(catas_list);
 static LIST_HEAD(catas_list);
-static struct workqueue_struct *catas_wq;
 static struct work_struct catas_work;
 static struct work_struct catas_work;
 
 
 static int internal_err_reset = 1;
 static int internal_err_reset = 1;
@@ -77,7 +76,7 @@ static void poll_catas(unsigned long dev_ptr)
 			list_add(&priv->catas_err.list, &catas_list);
 			list_add(&priv->catas_err.list, &catas_list);
 			spin_unlock(&catas_lock);
 			spin_unlock(&catas_lock);
 
 
-			queue_work(catas_wq, &catas_work);
+			queue_work(mlx4_wq, &catas_work);
 		}
 		}
 	} else
 	} else
 		mod_timer(&priv->catas_err.timer,
 		mod_timer(&priv->catas_err.timer,
@@ -146,18 +145,7 @@ void mlx4_stop_catas_poll(struct mlx4_dev *dev)
 	spin_unlock_irq(&catas_lock);
 	spin_unlock_irq(&catas_lock);
 }
 }
 
 
-int __init mlx4_catas_init(void)
+void  __init mlx4_catas_init(void)
 {
 {
 	INIT_WORK(&catas_work, catas_reset);
 	INIT_WORK(&catas_work, catas_reset);
-
-	catas_wq = create_singlethread_workqueue("mlx4_err");
-	if (!catas_wq)
-		return -ENOMEM;
-
-	return 0;
-}
-
-void mlx4_catas_cleanup(void)
-{
-	destroy_workqueue(catas_wq);
 }
 }

+ 11 - 5
drivers/net/mlx4/eq.c

@@ -163,6 +163,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
 	int cqn;
 	int cqn;
 	int eqes_found = 0;
 	int eqes_found = 0;
 	int set_ci = 0;
 	int set_ci = 0;
+	int port;
 
 
 	while ((eqe = next_eqe_sw(eq))) {
 	while ((eqe = next_eqe_sw(eq))) {
 		/*
 		/*
@@ -203,11 +204,16 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
 			break;
 			break;
 
 
 		case MLX4_EVENT_TYPE_PORT_CHANGE:
 		case MLX4_EVENT_TYPE_PORT_CHANGE:
-			mlx4_dispatch_event(dev,
-					    eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_ACTIVE ?
-					    MLX4_DEV_EVENT_PORT_UP :
-					    MLX4_DEV_EVENT_PORT_DOWN,
-					    be32_to_cpu(eqe->event.port_change.port) >> 28);
+			port = be32_to_cpu(eqe->event.port_change.port) >> 28;
+			if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) {
+				mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN,
+						    port);
+				mlx4_priv(dev)->sense.do_sense_port[port] = 1;
+			} else {
+				mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP,
+						    port);
+				mlx4_priv(dev)->sense.do_sense_port[port] = 0;
+			}
 			break;
 			break;
 
 
 		case MLX4_EVENT_TYPE_CQ_ERROR:
 		case MLX4_EVENT_TYPE_CQ_ERROR:

+ 79 - 27
drivers/net/mlx4/main.c

@@ -51,6 +51,8 @@ MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_VERSION(DRV_VERSION);
 MODULE_VERSION(DRV_VERSION);
 
 
+struct workqueue_struct *mlx4_wq;
+
 #ifdef CONFIG_MLX4_DEBUG
 #ifdef CONFIG_MLX4_DEBUG
 
 
 int mlx4_debug_level = 0;
 int mlx4_debug_level = 0;
@@ -98,24 +100,23 @@ module_param_named(use_prio, use_prio, bool, 0444);
 MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
 MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
 		  "(0/1, default 0)");
 		  "(0/1, default 0)");
 
 
-static int mlx4_check_port_params(struct mlx4_dev *dev,
-				  enum mlx4_port_type *port_type)
+int mlx4_check_port_params(struct mlx4_dev *dev,
+			   enum mlx4_port_type *port_type)
 {
 {
 	int i;
 	int i;
 
 
 	for (i = 0; i < dev->caps.num_ports - 1; i++) {
 	for (i = 0; i < dev->caps.num_ports - 1; i++) {
-		if (port_type[i] != port_type[i+1] &&
-		    !(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
-			mlx4_err(dev, "Only same port types supported "
-				 "on this HCA, aborting.\n");
-			return -EINVAL;
+		if (port_type[i] != port_type[i + 1]) {
+			if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
+				mlx4_err(dev, "Only same port types supported "
+					 "on this HCA, aborting.\n");
+				return -EINVAL;
+			}
+			if (port_type[i] == MLX4_PORT_TYPE_ETH &&
+			    port_type[i + 1] == MLX4_PORT_TYPE_IB)
+				return -EINVAL;
 		}
 		}
 	}
 	}
-	if ((port_type[0] == MLX4_PORT_TYPE_ETH) &&
-	    (port_type[1] == MLX4_PORT_TYPE_IB)) {
-		mlx4_err(dev, "eth-ib configuration is not supported.\n");
-		return -EINVAL;
-	}
 
 
 	for (i = 0; i < dev->caps.num_ports; i++) {
 	for (i = 0; i < dev->caps.num_ports; i++) {
 		if (!(port_type[i] & dev->caps.supported_type[i+1])) {
 		if (!(port_type[i] & dev->caps.supported_type[i+1])) {
@@ -225,6 +226,9 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 			dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
 			dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
 		else
 		else
 			dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
 			dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
+		dev->caps.possible_type[i] = dev->caps.port_type[i];
+		mlx4_priv(dev)->sense.sense_allowed[i] =
+			dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO;
 
 
 		if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
 		if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
 			dev->caps.log_num_macs = dev_cap->log_max_macs[i];
 			dev->caps.log_num_macs = dev_cap->log_max_macs[i];
@@ -263,14 +267,16 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
  * Change the port configuration of the device.
  * Change the port configuration of the device.
  * Every user of this function must hold the port mutex.
  * Every user of this function must hold the port mutex.
  */
  */
-static int mlx4_change_port_types(struct mlx4_dev *dev,
-				  enum mlx4_port_type *port_types)
+int mlx4_change_port_types(struct mlx4_dev *dev,
+			   enum mlx4_port_type *port_types)
 {
 {
 	int err = 0;
 	int err = 0;
 	int change = 0;
 	int change = 0;
 	int port;
 	int port;
 
 
 	for (port = 0; port <  dev->caps.num_ports; port++) {
 	for (port = 0; port <  dev->caps.num_ports; port++) {
+		/* Change the port type only if the new type is different
+		 * from the current, and not set to Auto */
 		if (port_types[port] != dev->caps.port_type[port + 1]) {
 		if (port_types[port] != dev->caps.port_type[port + 1]) {
 			change = 1;
 			change = 1;
 			dev->caps.port_type[port + 1] = port_types[port];
 			dev->caps.port_type[port + 1] = port_types[port];
@@ -302,10 +308,17 @@ static ssize_t show_port_type(struct device *dev,
 	struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
 	struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
 						   port_attr);
 						   port_attr);
 	struct mlx4_dev *mdev = info->dev;
 	struct mlx4_dev *mdev = info->dev;
+	char type[8];
+
+	sprintf(type, "%s",
+		(mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ?
+		"ib" : "eth");
+	if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO)
+		sprintf(buf, "auto (%s)\n", type);
+	else
+		sprintf(buf, "%s\n", type);
 
 
-	return sprintf(buf, "%s\n",
-		       mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB ?
-		       "ib" : "eth");
+	return strlen(buf);
 }
 }
 
 
 static ssize_t set_port_type(struct device *dev,
 static ssize_t set_port_type(struct device *dev,
@@ -317,6 +330,7 @@ static ssize_t set_port_type(struct device *dev,
 	struct mlx4_dev *mdev = info->dev;
 	struct mlx4_dev *mdev = info->dev;
 	struct mlx4_priv *priv = mlx4_priv(mdev);
 	struct mlx4_priv *priv = mlx4_priv(mdev);
 	enum mlx4_port_type types[MLX4_MAX_PORTS];
 	enum mlx4_port_type types[MLX4_MAX_PORTS];
+	enum mlx4_port_type new_types[MLX4_MAX_PORTS];
 	int i;
 	int i;
 	int err = 0;
 	int err = 0;
 
 
@@ -324,26 +338,56 @@ static ssize_t set_port_type(struct device *dev,
 		info->tmp_type = MLX4_PORT_TYPE_IB;
 		info->tmp_type = MLX4_PORT_TYPE_IB;
 	else if (!strcmp(buf, "eth\n"))
 	else if (!strcmp(buf, "eth\n"))
 		info->tmp_type = MLX4_PORT_TYPE_ETH;
 		info->tmp_type = MLX4_PORT_TYPE_ETH;
+	else if (!strcmp(buf, "auto\n"))
+		info->tmp_type = MLX4_PORT_TYPE_AUTO;
 	else {
 	else {
 		mlx4_err(mdev, "%s is not supported port type\n", buf);
 		mlx4_err(mdev, "%s is not supported port type\n", buf);
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
+	mlx4_stop_sense(mdev);
 	mutex_lock(&priv->port_mutex);
 	mutex_lock(&priv->port_mutex);
-	for (i = 0; i < mdev->caps.num_ports; i++)
+	/* Possible type is always the one that was delivered */
+	mdev->caps.possible_type[info->port] = info->tmp_type;
+
+	for (i = 0; i < mdev->caps.num_ports; i++) {
 		types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
 		types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
-					mdev->caps.port_type[i+1];
+					mdev->caps.possible_type[i+1];
+		if (types[i] == MLX4_PORT_TYPE_AUTO)
+			types[i] = mdev->caps.port_type[i+1];
+	}
 
 
-	err = mlx4_check_port_params(mdev, types);
+	if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
+		for (i = 1; i <= mdev->caps.num_ports; i++) {
+			if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
+				mdev->caps.possible_type[i] = mdev->caps.port_type[i];
+				err = -EINVAL;
+			}
+		}
+	}
+	if (err) {
+		mlx4_err(mdev, "Auto sensing is not supported on this HCA. "
+			       "Set only 'eth' or 'ib' for both ports "
+			       "(should be the same)\n");
+		goto out;
+	}
+
+	mlx4_do_sense_ports(mdev, new_types, types);
+
+	err = mlx4_check_port_params(mdev, new_types);
 	if (err)
 	if (err)
 		goto out;
 		goto out;
 
 
-	for (i = 1; i <= mdev->caps.num_ports; i++)
-		priv->port[i].tmp_type = 0;
+	/* We are about to apply the changes after the configuration
+	 * was verified, no need to remember the temporary types
+	 * any more */
+	for (i = 0; i < mdev->caps.num_ports; i++)
+		priv->port[i + 1].tmp_type = 0;
 
 
-	err = mlx4_change_port_types(mdev, types);
+	err = mlx4_change_port_types(mdev, new_types);
 
 
 out:
 out:
+	mlx4_start_sense(mdev);
 	mutex_unlock(&priv->port_mutex);
 	mutex_unlock(&priv->port_mutex);
 	return err ? err : count;
 	return err ? err : count;
 }
 }
@@ -1117,6 +1161,9 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
 	if (err)
 	if (err)
 		goto err_port;
 		goto err_port;
 
 
+	mlx4_sense_init(dev);
+	mlx4_start_sense(dev);
+
 	pci_set_drvdata(pdev, dev);
 	pci_set_drvdata(pdev, dev);
 
 
 	return 0;
 	return 0;
@@ -1182,6 +1229,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
 	int p;
 	int p;
 
 
 	if (dev) {
 	if (dev) {
+		mlx4_stop_sense(dev);
 		mlx4_unregister_device(dev);
 		mlx4_unregister_device(dev);
 
 
 		for (p = 1; p <= dev->caps.num_ports; p++) {
 		for (p = 1; p <= dev->caps.num_ports; p++) {
@@ -1230,6 +1278,8 @@ static struct pci_device_id mlx4_pci_table[] = {
 	{ PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */
 	{ PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */
 	{ PCI_VDEVICE(MELLANOX, 0x6368) }, /* MT25408 "Hermon" EN 10GigE */
 	{ PCI_VDEVICE(MELLANOX, 0x6368) }, /* MT25408 "Hermon" EN 10GigE */
 	{ PCI_VDEVICE(MELLANOX, 0x6750) }, /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
 	{ PCI_VDEVICE(MELLANOX, 0x6750) }, /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
+	{ PCI_VDEVICE(MELLANOX, 0x6372) }, /* MT25458 ConnectX EN 10GBASE-T 10GigE */
+	{ PCI_VDEVICE(MELLANOX, 0x675a) }, /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
 	{ 0, }
 	{ 0, }
 };
 };
 
 
@@ -1264,9 +1314,11 @@ static int __init mlx4_init(void)
 	if (mlx4_verify_params())
 	if (mlx4_verify_params())
 		return -EINVAL;
 		return -EINVAL;
 
 
-	ret = mlx4_catas_init();
-	if (ret)
-		return ret;
+	mlx4_catas_init();
+
+	mlx4_wq = create_singlethread_workqueue("mlx4");
+	if (!mlx4_wq)
+		return -ENOMEM;
 
 
 	ret = pci_register_driver(&mlx4_driver);
 	ret = pci_register_driver(&mlx4_driver);
 	return ret < 0 ? ret : 0;
 	return ret < 0 ? ret : 0;
@@ -1275,7 +1327,7 @@ static int __init mlx4_init(void)
 static void __exit mlx4_cleanup(void)
 static void __exit mlx4_cleanup(void)
 {
 {
 	pci_unregister_driver(&mlx4_driver);
 	pci_unregister_driver(&mlx4_driver);
-	mlx4_catas_cleanup();
+	destroy_workqueue(mlx4_wq);
 }
 }
 
 
 module_init(mlx4_init);
 module_init(mlx4_init);

+ 25 - 2
drivers/net/mlx4/mlx4.h

@@ -40,6 +40,7 @@
 #include <linux/mutex.h>
 #include <linux/mutex.h>
 #include <linux/radix-tree.h>
 #include <linux/radix-tree.h>
 #include <linux/timer.h>
 #include <linux/timer.h>
+#include <linux/workqueue.h>
 
 
 #include <linux/mlx4/device.h>
 #include <linux/mlx4/device.h>
 #include <linux/mlx4/driver.h>
 #include <linux/mlx4/driver.h>
@@ -276,6 +277,13 @@ struct mlx4_port_info {
 	struct mlx4_vlan_table	vlan_table;
 	struct mlx4_vlan_table	vlan_table;
 };
 };
 
 
+struct mlx4_sense {
+	struct mlx4_dev		*dev;
+	u8			do_sense_port[MLX4_MAX_PORTS + 1];
+	u8			sense_allowed[MLX4_MAX_PORTS + 1];
+	struct delayed_work	sense_poll;
+};
+
 struct mlx4_priv {
 struct mlx4_priv {
 	struct mlx4_dev		dev;
 	struct mlx4_dev		dev;
 
 
@@ -305,6 +313,7 @@ struct mlx4_priv {
 	struct mlx4_uar		driver_uar;
 	struct mlx4_uar		driver_uar;
 	void __iomem	       *kar;
 	void __iomem	       *kar;
 	struct mlx4_port_info	port[MLX4_MAX_PORTS + 1];
 	struct mlx4_port_info	port[MLX4_MAX_PORTS + 1];
+	struct mlx4_sense       sense;
 	struct mutex		port_mutex;
 	struct mutex		port_mutex;
 };
 };
 
 
@@ -313,6 +322,10 @@ static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
 	return container_of(dev, struct mlx4_priv, dev);
 	return container_of(dev, struct mlx4_priv, dev);
 }
 }
 
 
+#define MLX4_SENSE_RANGE	(HZ * 3)
+
+extern struct workqueue_struct *mlx4_wq;
+
 u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap);
 u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap);
 void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj);
 void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj);
 u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align);
 u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align);
@@ -346,8 +359,7 @@ void mlx4_cleanup_mcg_table(struct mlx4_dev *dev);
 
 
 void mlx4_start_catas_poll(struct mlx4_dev *dev);
 void mlx4_start_catas_poll(struct mlx4_dev *dev);
 void mlx4_stop_catas_poll(struct mlx4_dev *dev);
 void mlx4_stop_catas_poll(struct mlx4_dev *dev);
-int mlx4_catas_init(void);
-void mlx4_catas_cleanup(void);
+void mlx4_catas_init(void);
 int mlx4_restart_one(struct pci_dev *pdev);
 int mlx4_restart_one(struct pci_dev *pdev);
 int mlx4_register_device(struct mlx4_dev *dev);
 int mlx4_register_device(struct mlx4_dev *dev);
 void mlx4_unregister_device(struct mlx4_dev *dev);
 void mlx4_unregister_device(struct mlx4_dev *dev);
@@ -379,6 +391,17 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
 
 
 void mlx4_handle_catas_err(struct mlx4_dev *dev);
 void mlx4_handle_catas_err(struct mlx4_dev *dev);
 
 
+void mlx4_do_sense_ports(struct mlx4_dev *dev,
+			 enum mlx4_port_type *stype,
+			 enum mlx4_port_type *defaults);
+void mlx4_start_sense(struct mlx4_dev *dev);
+void mlx4_stop_sense(struct mlx4_dev *dev);
+void mlx4_sense_init(struct mlx4_dev *dev);
+int mlx4_check_port_params(struct mlx4_dev *dev,
+			   enum mlx4_port_type *port_type);
+int mlx4_change_port_types(struct mlx4_dev *dev,
+			   enum mlx4_port_type *port_types);
+
 void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table);
 void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table);
 void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
 void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
 
 

+ 5 - 8
drivers/net/mlx4/port.c

@@ -298,20 +298,17 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port)
 {
 {
 	struct mlx4_cmd_mailbox *mailbox;
 	struct mlx4_cmd_mailbox *mailbox;
 	int err;
 	int err;
-	u8 is_eth = dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
 
 
 	mailbox = mlx4_alloc_cmd_mailbox(dev);
 	mailbox = mlx4_alloc_cmd_mailbox(dev);
 	if (IS_ERR(mailbox))
 	if (IS_ERR(mailbox))
 		return PTR_ERR(mailbox);
 		return PTR_ERR(mailbox);
 
 
 	memset(mailbox->buf, 0, 256);
 	memset(mailbox->buf, 0, 256);
-	if (is_eth) {
-		((u8 *) mailbox->buf)[3] = 6;
-		((__be16 *) mailbox->buf)[4] = cpu_to_be16(1 << 15);
-		((__be16 *) mailbox->buf)[6] = cpu_to_be16(1 << 15);
-	} else
-		((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
-	err = mlx4_cmd(dev, mailbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
+	if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
+		return 0;
+
+	((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
+	err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
 		       MLX4_CMD_TIME_CLASS_B);
 		       MLX4_CMD_TIME_CLASS_B);
 
 
 	mlx4_free_cmd_mailbox(dev, mailbox);
 	mlx4_free_cmd_mailbox(dev, mailbox);

+ 156 - 0
drivers/net/mlx4/sense.c

@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/errno.h>
+#include <linux/if_ether.h>
+
+#include <linux/mlx4/cmd.h>
+
+#include "mlx4.h"
+
+static int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
+			   enum mlx4_port_type *type)
+{
+	u64 out_param;
+	int err = 0;
+
+	err = mlx4_cmd_imm(dev, 0, &out_param, port, 0,
+			   MLX4_CMD_SENSE_PORT, MLX4_CMD_TIME_CLASS_B);
+	if (err) {
+		mlx4_err(dev, "Sense command failed for port: %d\n", port);
+		return err;
+	}
+
+	if (out_param > 2) {
+		mlx4_err(dev, "Sense returned illegal value: 0x%llx\n", out_param);
+		return EINVAL;
+	}
+
+	*type = out_param;
+	return 0;
+}
+
+void mlx4_do_sense_ports(struct mlx4_dev *dev,
+			 enum mlx4_port_type *stype,
+			 enum mlx4_port_type *defaults)
+{
+	struct mlx4_sense *sense = &mlx4_priv(dev)->sense;
+	int err;
+	int i;
+
+	for (i = 1; i <= dev->caps.num_ports; i++) {
+		stype[i - 1] = 0;
+		if (sense->do_sense_port[i] && sense->sense_allowed[i] &&
+		    dev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
+			err = mlx4_SENSE_PORT(dev, i, &stype[i - 1]);
+			if (err)
+				stype[i - 1] = defaults[i - 1];
+		} else
+			stype[i - 1] = defaults[i - 1];
+	}
+
+	/*
+	 * Adjust port configuration:
+	 * If port 1 sensed nothing and port 2 is IB, set both as IB
+	 * If port 2 sensed nothing and port 1 is Eth, set both as Eth
+	 */
+	if (stype[0] == MLX4_PORT_TYPE_ETH) {
+		for (i = 1; i < dev->caps.num_ports; i++)
+			stype[i] = stype[i] ? stype[i] : MLX4_PORT_TYPE_ETH;
+	}
+	if (stype[dev->caps.num_ports - 1] == MLX4_PORT_TYPE_IB) {
+		for (i = 0; i < dev->caps.num_ports - 1; i++)
+			stype[i] = stype[i] ? stype[i] : MLX4_PORT_TYPE_IB;
+	}
+
+	/*
+	 * If sensed nothing, remain in current configuration.
+	 */
+	for (i = 0; i < dev->caps.num_ports; i++)
+		stype[i] = stype[i] ? stype[i] : defaults[i];
+
+}
+
+static void mlx4_sense_port(struct work_struct *work)
+{
+	struct delayed_work *delay = container_of(work, struct delayed_work, work);
+	struct mlx4_sense *sense = container_of(delay, struct mlx4_sense,
+						sense_poll);
+	struct mlx4_dev *dev = sense->dev;
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	enum mlx4_port_type stype[MLX4_MAX_PORTS];
+
+	mutex_lock(&priv->port_mutex);
+	mlx4_do_sense_ports(dev, stype, &dev->caps.port_type[1]);
+
+	if (mlx4_check_port_params(dev, stype))
+		goto sense_again;
+
+	if (mlx4_change_port_types(dev, stype))
+		mlx4_err(dev, "Failed to change port_types\n");
+
+sense_again:
+	mutex_unlock(&priv->port_mutex);
+	queue_delayed_work(mlx4_wq , &sense->sense_poll,
+			   round_jiffies_relative(MLX4_SENSE_RANGE));
+}
+
+void mlx4_start_sense(struct mlx4_dev *dev)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_sense *sense = &priv->sense;
+
+	if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP))
+		return;
+
+	queue_delayed_work(mlx4_wq , &sense->sense_poll,
+			   round_jiffies_relative(MLX4_SENSE_RANGE));
+}
+
+void mlx4_stop_sense(struct mlx4_dev *dev)
+{
+	cancel_delayed_work_sync(&mlx4_priv(dev)->sense.sense_poll);
+}
+
+void  mlx4_sense_init(struct mlx4_dev *dev)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_sense *sense = &priv->sense;
+	int port;
+
+	sense->dev = dev;
+	for (port = 1; port <= dev->caps.num_ports; port++)
+		sense->do_sense_port[port] = 1;
+
+	INIT_DELAYED_WORK_DEFERRABLE(&sense->sense_poll, mlx4_sense_port);
+}

+ 1 - 0
include/linux/mlx4/cmd.h

@@ -55,6 +55,7 @@ enum {
 	MLX4_CMD_CLOSE_PORT	 = 0xa,
 	MLX4_CMD_CLOSE_PORT	 = 0xa,
 	MLX4_CMD_QUERY_HCA	 = 0xb,
 	MLX4_CMD_QUERY_HCA	 = 0xb,
 	MLX4_CMD_QUERY_PORT	 = 0x43,
 	MLX4_CMD_QUERY_PORT	 = 0x43,
+	MLX4_CMD_SENSE_PORT	 = 0x4d,
 	MLX4_CMD_SET_PORT	 = 0xc,
 	MLX4_CMD_SET_PORT	 = 0xc,
 	MLX4_CMD_ACCESS_DDR	 = 0x2e,
 	MLX4_CMD_ACCESS_DDR	 = 0x2e,
 	MLX4_CMD_MAP_ICM	 = 0xffa,
 	MLX4_CMD_MAP_ICM	 = 0xffa,

+ 4 - 2
include/linux/mlx4/device.h

@@ -155,8 +155,9 @@ enum mlx4_qp_region {
 };
 };
 
 
 enum mlx4_port_type {
 enum mlx4_port_type {
-	MLX4_PORT_TYPE_IB	= 1 << 0,
-	MLX4_PORT_TYPE_ETH	= 1 << 1,
+	MLX4_PORT_TYPE_IB	= 1,
+	MLX4_PORT_TYPE_ETH	= 2,
+	MLX4_PORT_TYPE_AUTO	= 3
 };
 };
 
 
 enum mlx4_special_vlan_idx {
 enum mlx4_special_vlan_idx {
@@ -237,6 +238,7 @@ struct mlx4_caps {
 	enum mlx4_port_type	port_type[MLX4_MAX_PORTS + 1];
 	enum mlx4_port_type	port_type[MLX4_MAX_PORTS + 1];
 	u8			supported_type[MLX4_MAX_PORTS + 1];
 	u8			supported_type[MLX4_MAX_PORTS + 1];
 	u32			port_mask;
 	u32			port_mask;
+	enum mlx4_port_type	possible_type[MLX4_MAX_PORTS + 1];
 };
 };
 
 
 struct mlx4_buf_list {
 struct mlx4_buf_list {

+ 6 - 6
include/rdma/ib_cm.h

@@ -314,12 +314,12 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
  */
  */
 void ib_destroy_cm_id(struct ib_cm_id *cm_id);
 void ib_destroy_cm_id(struct ib_cm_id *cm_id);
 
 
-#define IB_SERVICE_ID_AGN_MASK	__constant_cpu_to_be64(0xFF00000000000000ULL)
-#define IB_CM_ASSIGN_SERVICE_ID __constant_cpu_to_be64(0x0200000000000000ULL)
-#define IB_CMA_SERVICE_ID	__constant_cpu_to_be64(0x0000000001000000ULL)
-#define IB_CMA_SERVICE_ID_MASK	__constant_cpu_to_be64(0xFFFFFFFFFF000000ULL)
-#define IB_SDP_SERVICE_ID	__constant_cpu_to_be64(0x0000000000010000ULL)
-#define IB_SDP_SERVICE_ID_MASK	__constant_cpu_to_be64(0xFFFFFFFFFFFF0000ULL)
+#define IB_SERVICE_ID_AGN_MASK	cpu_to_be64(0xFF00000000000000ULL)
+#define IB_CM_ASSIGN_SERVICE_ID	cpu_to_be64(0x0200000000000000ULL)
+#define IB_CMA_SERVICE_ID	cpu_to_be64(0x0000000001000000ULL)
+#define IB_CMA_SERVICE_ID_MASK	cpu_to_be64(0xFFFFFFFFFF000000ULL)
+#define IB_SDP_SERVICE_ID	cpu_to_be64(0x0000000000010000ULL)
+#define IB_SDP_SERVICE_ID_MASK	cpu_to_be64(0xFFFFFFFFFFFF0000ULL)
 
 
 struct ib_cm_compare_data {
 struct ib_cm_compare_data {
 	u8  data[IB_CM_COMPARE_SIZE];
 	u8  data[IB_CM_COMPARE_SIZE];

+ 2 - 2
include/rdma/ib_mad.h

@@ -107,7 +107,7 @@
 #define	IB_MGMT_RMPP_STATUS_ABORT_MAX		127
 #define	IB_MGMT_RMPP_STATUS_ABORT_MAX		127
 
 
 #define IB_QP0		0
 #define IB_QP0		0
-#define IB_QP1		__constant_htonl(1)
+#define IB_QP1		cpu_to_be32(1)
 #define IB_QP1_QKEY	0x80010000
 #define IB_QP1_QKEY	0x80010000
 #define IB_QP_SET_QKEY	0x80000000
 #define IB_QP_SET_QKEY	0x80000000
 
 
@@ -290,7 +290,7 @@ static inline void ib_set_rmpp_resptime(struct ib_rmpp_hdr *rmpp_hdr, u8 rtime)
  */
  */
 static inline void ib_set_rmpp_flags(struct ib_rmpp_hdr *rmpp_hdr, u8 flags)
 static inline void ib_set_rmpp_flags(struct ib_rmpp_hdr *rmpp_hdr, u8 flags)
 {
 {
-	rmpp_hdr->rmpp_rtime_flags = (rmpp_hdr->rmpp_rtime_flags & 0xF1) |
+	rmpp_hdr->rmpp_rtime_flags = (rmpp_hdr->rmpp_rtime_flags & 0xF8) |
 				     (flags & 0x7);
 				     (flags & 0x7);
 }
 }
 
 

+ 17 - 17
include/rdma/ib_smi.h

@@ -63,25 +63,25 @@ struct ib_smp {
 	u8	return_path[IB_SMP_MAX_PATH_HOPS];
 	u8	return_path[IB_SMP_MAX_PATH_HOPS];
 } __attribute__ ((packed));
 } __attribute__ ((packed));
 
 
-#define IB_SMP_DIRECTION			__constant_htons(0x8000)
+#define IB_SMP_DIRECTION			cpu_to_be16(0x8000)
 
 
 /* Subnet management attributes */
 /* Subnet management attributes */
-#define IB_SMP_ATTR_NOTICE			__constant_htons(0x0002)
-#define IB_SMP_ATTR_NODE_DESC			__constant_htons(0x0010)
-#define IB_SMP_ATTR_NODE_INFO			__constant_htons(0x0011)
-#define IB_SMP_ATTR_SWITCH_INFO			__constant_htons(0x0012)
-#define IB_SMP_ATTR_GUID_INFO			__constant_htons(0x0014)
-#define IB_SMP_ATTR_PORT_INFO			__constant_htons(0x0015)
-#define IB_SMP_ATTR_PKEY_TABLE			__constant_htons(0x0016)
-#define IB_SMP_ATTR_SL_TO_VL_TABLE		__constant_htons(0x0017)
-#define IB_SMP_ATTR_VL_ARB_TABLE		__constant_htons(0x0018)
-#define IB_SMP_ATTR_LINEAR_FORWARD_TABLE	__constant_htons(0x0019)
-#define IB_SMP_ATTR_RANDOM_FORWARD_TABLE	__constant_htons(0x001A)
-#define IB_SMP_ATTR_MCAST_FORWARD_TABLE		__constant_htons(0x001B)
-#define IB_SMP_ATTR_SM_INFO			__constant_htons(0x0020)
-#define IB_SMP_ATTR_VENDOR_DIAG			__constant_htons(0x0030)
-#define IB_SMP_ATTR_LED_INFO			__constant_htons(0x0031)
-#define IB_SMP_ATTR_VENDOR_MASK			__constant_htons(0xFF00)
+#define IB_SMP_ATTR_NOTICE			cpu_to_be16(0x0002)
+#define IB_SMP_ATTR_NODE_DESC			cpu_to_be16(0x0010)
+#define IB_SMP_ATTR_NODE_INFO			cpu_to_be16(0x0011)
+#define IB_SMP_ATTR_SWITCH_INFO			cpu_to_be16(0x0012)
+#define IB_SMP_ATTR_GUID_INFO			cpu_to_be16(0x0014)
+#define IB_SMP_ATTR_PORT_INFO			cpu_to_be16(0x0015)
+#define IB_SMP_ATTR_PKEY_TABLE			cpu_to_be16(0x0016)
+#define IB_SMP_ATTR_SL_TO_VL_TABLE		cpu_to_be16(0x0017)
+#define IB_SMP_ATTR_VL_ARB_TABLE		cpu_to_be16(0x0018)
+#define IB_SMP_ATTR_LINEAR_FORWARD_TABLE	cpu_to_be16(0x0019)
+#define IB_SMP_ATTR_RANDOM_FORWARD_TABLE	cpu_to_be16(0x001A)
+#define IB_SMP_ATTR_MCAST_FORWARD_TABLE		cpu_to_be16(0x001B)
+#define IB_SMP_ATTR_SM_INFO			cpu_to_be16(0x0020)
+#define IB_SMP_ATTR_VENDOR_DIAG			cpu_to_be16(0x0030)
+#define IB_SMP_ATTR_LED_INFO			cpu_to_be16(0x0031)
+#define IB_SMP_ATTR_VENDOR_MASK			cpu_to_be16(0xFF00)
 
 
 struct ib_port_info {
 struct ib_port_info {
 	__be64 mkey;
 	__be64 mkey;

Kaikkia tiedostoja ei voida näyttää, sillä liian monta tiedostoa muuttui tässä diffissä