Lines Matching defs:priv

122 	struct mlx4_priv *priv = container_of(mfunc, struct mlx4_priv, mfunc);
123 struct mlx4_dev *dev = &priv->dev;
155 struct mlx4_priv *priv = mlx4_priv(dev);
156 struct mlx4_slave_event_eq *slave_eq = &priv->mfunc.master.slave_eq;
174 queue_work(priv->mfunc.master.comm_wq,
175 &priv->mfunc.master.slave_event_work);
181 struct mlx4_priv *priv = mlx4_priv(dev);
183 &priv->mfunc.master.slave_state[slave];
200 struct mlx4_priv *priv =
202 struct mlx4_dev *dev = &priv->dev;
203 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
217 spin_lock(&priv->mfunc.master.slave_state_lock);
220 spin_unlock(&priv->mfunc.master.slave_state_lock);
233 struct mlx4_priv *priv = mlx4_priv(dev);
412 memcpy(&priv->mfunc.master.comm_arm_bit_vector,
415 queue_work(priv->mfunc.master.comm_wq,
416 &priv->mfunc.master.comm_work);
437 spin_lock(&priv->mfunc.master.slave_state_lock);
439 priv->mfunc.master.slave_state[flr_slave].active = false;
440 priv->mfunc.master.slave_state[flr_slave].last_cmd = MLX4_COMM_CMD_FLR;
441 priv->mfunc.master.slave_state[flr_slave].is_slave_going_down = 1;
443 spin_unlock(&priv->mfunc.master.slave_state_lock);
444 queue_work(priv->mfunc.master.comm_wq,
445 &priv->mfunc.master.slave_flr_event_work);
515 struct mlx4_priv *priv = mlx4_priv(dev);
519 writel(priv->eq_table.clr_mask, priv->eq_table.clr_int);
522 work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]);
544 struct mlx4_priv *priv = mlx4_priv(dev);
546 priv->mfunc.master.slave_state[slave].event_eq;
602 struct mlx4_priv *priv = mlx4_priv(dev);
607 if (!priv->eq_table.uar_map[index]) {
608 priv->eq_table.uar_map[index] =
612 if (!priv->eq_table.uar_map[index]) {
619 return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4);
625 struct mlx4_priv *priv = mlx4_priv(dev);
668 eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap);
714 mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
736 struct mlx4_priv *priv = mlx4_priv(dev);
768 mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
775 struct mlx4_priv *priv = mlx4_priv(dev);
792 if (priv->msix_ctl.pool_bm & 1ULL << i) {
795 free_irq(priv->eq_table.eq[vec].irq,
796 &priv->eq_table.eq[vec]);
806 struct mlx4_priv *priv = mlx4_priv(dev);
808 priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) +
809 priv->fw.clr_int_base, MLX4_CLR_INT_SIZE);
810 if (!priv->clr_base) {
820 struct mlx4_priv *priv = mlx4_priv(dev);
822 iounmap(priv->clr_base);
827 struct mlx4_priv *priv = mlx4_priv(dev);
829 priv->eq_table.eq = kcalloc(dev->caps.num_eqs - dev->caps.reserved_eqs,
830 sizeof *priv->eq_table.eq, GFP_KERNEL);
831 if (!priv->eq_table.eq)
844 struct mlx4_priv *priv = mlx4_priv(dev);
848 priv->eq_table.uar_map = kcalloc(mlx4_num_eq_uar(dev),
849 sizeof *priv->eq_table.uar_map,
851 if (!priv->eq_table.uar_map) {
856 err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs,
862 priv->eq_table.uar_map[i] = NULL;
869 priv->eq_table.clr_mask =
870 swab32(1 << (priv->eq_table.inta_pin & 31));
871 priv->eq_table.clr_int = priv->clr_base +
872 (priv->eq_table.inta_pin < 32 ? 4 : 0);
875 priv->eq_table.irq_names =
879 if (!priv->eq_table.irq_names) {
889 &priv->eq_table.eq[i]);
898 &priv->eq_table.eq[dev->caps.num_comp_vectors]);
910 &priv->eq_table.eq[i]);
923 snprintf(priv->eq_table.irq_names +
929 snprintf(priv->eq_table.irq_names +
936 eq_name = priv->eq_table.irq_names +
938 err = request_irq(priv->eq_table.eq[i].irq,
940 priv->eq_table.eq + i);
944 priv->eq_table.eq[i].have_irq = 1;
947 snprintf(priv->eq_table.irq_names,
952 IRQF_SHARED, priv->eq_table.irq_names, dev);
956 priv->eq_table.have_irq = 1;
960 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
963 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn, err);
966 eq_set_ci(&priv->eq_table.eq[i], 1);
971 mlx4_free_eq(dev, &priv->eq_table.eq[dev->caps.num_comp_vectors]);
978 mlx4_free_eq(dev, &priv->eq_table.eq[i]);
986 mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
989 kfree(priv->eq_table.uar_map);
996 struct mlx4_priv *priv = mlx4_priv(dev);
1000 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
1005 mlx4_free_eq(dev, &priv->eq_table.eq[i]);
1011 if (priv->eq_table.uar_map[i])
1012 iounmap(priv->eq_table.uar_map[i]);
1014 mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
1016 kfree(priv->eq_table.uar_map);
1025 struct mlx4_priv *priv = mlx4_priv(dev);
1044 priv->eq_table.eq[i].eqn);
1058 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
1066 struct mlx4_priv *priv = mlx4_priv(dev);
1069 mutex_lock(&priv->msix_ctl.pool_lock);
1071 if (~priv->msix_ctl.pool_bm & 1ULL << i) {
1072 priv->msix_ctl.pool_bm |= 1ULL << i;
1074 snprintf(priv->eq_table.irq_names +
1077 err = request_irq(priv->eq_table.eq[vec].irq,
1079 &priv->eq_table.irq_names[vec<<5],
1080 priv->eq_table.eq + vec);
1083 priv->msix_ctl.pool_bm ^= 1 << i;
1088 eq_set_ci(&priv->eq_table.eq[vec], 1);
1091 mutex_unlock(&priv->msix_ctl.pool_lock);
1105 struct mlx4_priv *priv = mlx4_priv(dev);
1112 mutex_lock(&priv->msix_ctl.pool_lock);
1113 if (priv->msix_ctl.pool_bm & 1ULL << i) {
1114 free_irq(priv->eq_table.eq[vec].irq,
1115 &priv->eq_table.eq[vec]);
1116 priv->msix_ctl.pool_bm &= ~(1ULL << i);
1118 mutex_unlock(&priv->msix_ctl.pool_lock);