diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-07-22 14:50:12 -0700 | 
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-07-22 14:50:12 -0700 | 
| commit | ece236ce2fad9c27a6fd2530f899289025194bce (patch) | |
| tree | 474b793205872206a2a3f7d409ff9b1f81f3a9a8 /drivers/infiniband/hw/mlx4/mad.c | |
| parent | 441c196e84b11aad3123baa9320eee7abc6b5c98 (diff) | |
| parent | 4460207561290c3be7e6c7538f22690028170c1d (diff) | |
| download | olio-linux-3.10-ece236ce2fad9c27a6fd2530f899289025194bce.tar.xz olio-linux-3.10-ece236ce2fad9c27a6fd2530f899289025194bce.zip  | |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (26 commits)
  IB/qib: Defer HCA error events to tasklet
  mlx4_core: Bump the driver version to 1.0
  RDMA/cxgb4: Use printk_ratelimited() instead of printk_ratelimit()
  IB/mlx4: Support PMA counters for IBoE
  IB/mlx4: Use flow counters on IBoE ports
  IB/pma: Add include file for IBA performance counters definitions
  mlx4_core: Add network flow counters
  mlx4_core: Fix location of counter index in QP context struct
  mlx4_core: Read extended capabilities into the flags field
  mlx4_core: Extend capability flags to 64 bits
  IB/mlx4: Generate GID change events in IBoE code
  IB/core: Add GID change event
  RDMA/cma: Don't allow IPoIB port space for IBoE
  RDMA: Allow for NULL .modify_device() and .modify_port() methods
  IB/qib: Update active link width
  IB/qib: Fix potential deadlock with link down interrupt
  IB/qib: Add sysfs interface to read free contexts
  IB/mthca: Remove unnecessary read of PCI_CAP_ID_EXP
  IB/qib: Remove double define
  IB/qib: Remove unnecessary read of PCI_CAP_ID_EXP
  ...
Diffstat (limited to 'drivers/infiniband/hw/mlx4/mad.c')
| -rw-r--r-- | drivers/infiniband/hw/mlx4/mad.c | 68 | 
1 files changed, 67 insertions, 1 deletions
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index 57ffa50f509..f36da994a85 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -35,6 +35,7 @@  #include <linux/mlx4/cmd.h>  #include <linux/gfp.h> +#include <rdma/ib_pma.h>  #include "mlx4_ib.h" @@ -232,7 +233,7 @@ static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *ma  	}  } -int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags,	u8 port_num, +static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,  			struct ib_wc *in_wc, struct ib_grh *in_grh,  			struct ib_mad *in_mad, struct ib_mad *out_mad)  { @@ -302,6 +303,71 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags,	u8 port_num,  	return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;  } +static void edit_counter(struct mlx4_counter *cnt, +					struct ib_pma_portcounters *pma_cnt) +{ +	pma_cnt->port_xmit_data = cpu_to_be32((be64_to_cpu(cnt->tx_bytes)>>2)); +	pma_cnt->port_rcv_data  = cpu_to_be32((be64_to_cpu(cnt->rx_bytes)>>2)); +	pma_cnt->port_xmit_packets = cpu_to_be32(be64_to_cpu(cnt->tx_frames)); +	pma_cnt->port_rcv_packets  = cpu_to_be32(be64_to_cpu(cnt->rx_frames)); +} + +static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, +			struct ib_wc *in_wc, struct ib_grh *in_grh, +			struct ib_mad *in_mad, struct ib_mad *out_mad) +{ +	struct mlx4_cmd_mailbox *mailbox; +	struct mlx4_ib_dev *dev = to_mdev(ibdev); +	int err; +	u32 inmod = dev->counters[port_num - 1] & 0xffff; +	u8 mode; + +	if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT) +		return -EINVAL; + +	mailbox = mlx4_alloc_cmd_mailbox(dev->dev); +	if (IS_ERR(mailbox)) +		return IB_MAD_RESULT_FAILURE; + +	err = mlx4_cmd_box(dev->dev, 0, mailbox->dma, inmod, 0, +			   MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C); +	if (err) +		err = IB_MAD_RESULT_FAILURE; +	else { +		memset(out_mad->data, 0, sizeof out_mad->data); +		mode = ((struct mlx4_counter *)mailbox->buf)->counter_mode; +		switch (mode & 0xf) { +		case 0: +			edit_counter(mailbox->buf, +						(void *)(out_mad->data + 40)); +			err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; +			break; +		default: +			err = IB_MAD_RESULT_FAILURE; +		} +	} + +	mlx4_free_cmd_mailbox(dev->dev, mailbox); + +	return err; +} + +int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, +			struct ib_wc *in_wc, struct ib_grh *in_grh, +			struct ib_mad *in_mad, struct ib_mad *out_mad) +{ +	switch (rdma_port_get_link_layer(ibdev, port_num)) { +	case IB_LINK_LAYER_INFINIBAND: +		return ib_process_mad(ibdev, mad_flags, port_num, in_wc, +				      in_grh, in_mad, out_mad); +	case IB_LINK_LAYER_ETHERNET: +		return iboe_process_mad(ibdev, mad_flags, port_num, in_wc, +					  in_grh, in_mad, out_mad); +	default: +		return -EINVAL; +	} +} +  static void send_handler(struct ib_mad_agent *agent,  			 struct ib_mad_send_wc *mad_send_wc)  {  |