Skip to content

Commit 04c349a

Browse files
Maor Gottliebjgunthorpe
authored andcommitted
RDMA/mad: Remove snoop interface
Snoop interface is not used. Remove it. Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Maor Gottlieb <[email protected]> Signed-off-by: Leon Romanovsky <[email protected]> Signed-off-by: Jason Gunthorpe <[email protected]>
1 parent f86e343 commit 04c349a

File tree

2 files changed

+6
-281
lines changed

2 files changed

+6
-281
lines changed

drivers/infiniband/core/mad.c

Lines changed: 5 additions & 233 deletions
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,6 @@ MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests
8585
module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
8686
MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
8787

88-
/* Client ID 0 is used for snoop-only clients */
8988
static DEFINE_XARRAY_ALLOC1(ib_mad_clients);
9089
static u32 ib_mad_client_next;
9190
static struct list_head ib_mad_port_list;
@@ -483,141 +482,12 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
483482
}
484483
EXPORT_SYMBOL(ib_register_mad_agent);
485484

486-
static inline int is_snooping_sends(int mad_snoop_flags)
487-
{
488-
return (mad_snoop_flags &
489-
(/*IB_MAD_SNOOP_POSTED_SENDS |
490-
IB_MAD_SNOOP_RMPP_SENDS |*/
491-
IB_MAD_SNOOP_SEND_COMPLETIONS /*|
492-
IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
493-
}
494-
495-
static inline int is_snooping_recvs(int mad_snoop_flags)
496-
{
497-
return (mad_snoop_flags &
498-
(IB_MAD_SNOOP_RECVS /*|
499-
IB_MAD_SNOOP_RMPP_RECVS*/));
500-
}
501-
502-
static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
503-
struct ib_mad_snoop_private *mad_snoop_priv)
504-
{
505-
struct ib_mad_snoop_private **new_snoop_table;
506-
unsigned long flags;
507-
int i;
508-
509-
spin_lock_irqsave(&qp_info->snoop_lock, flags);
510-
/* Check for empty slot in array. */
511-
for (i = 0; i < qp_info->snoop_table_size; i++)
512-
if (!qp_info->snoop_table[i])
513-
break;
514-
515-
if (i == qp_info->snoop_table_size) {
516-
/* Grow table. */
517-
new_snoop_table = krealloc(qp_info->snoop_table,
518-
sizeof mad_snoop_priv *
519-
(qp_info->snoop_table_size + 1),
520-
GFP_ATOMIC);
521-
if (!new_snoop_table) {
522-
i = -ENOMEM;
523-
goto out;
524-
}
525-
526-
qp_info->snoop_table = new_snoop_table;
527-
qp_info->snoop_table_size++;
528-
}
529-
qp_info->snoop_table[i] = mad_snoop_priv;
530-
atomic_inc(&qp_info->snoop_count);
531-
out:
532-
spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
533-
return i;
534-
}
535-
536-
struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
537-
u8 port_num,
538-
enum ib_qp_type qp_type,
539-
int mad_snoop_flags,
540-
ib_mad_snoop_handler snoop_handler,
541-
ib_mad_recv_handler recv_handler,
542-
void *context)
543-
{
544-
struct ib_mad_port_private *port_priv;
545-
struct ib_mad_agent *ret;
546-
struct ib_mad_snoop_private *mad_snoop_priv;
547-
int qpn;
548-
int err;
549-
550-
/* Validate parameters */
551-
if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
552-
(is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
553-
ret = ERR_PTR(-EINVAL);
554-
goto error1;
555-
}
556-
qpn = get_spl_qp_index(qp_type);
557-
if (qpn == -1) {
558-
ret = ERR_PTR(-EINVAL);
559-
goto error1;
560-
}
561-
port_priv = ib_get_mad_port(device, port_num);
562-
if (!port_priv) {
563-
ret = ERR_PTR(-ENODEV);
564-
goto error1;
565-
}
566-
/* Allocate structures */
567-
mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
568-
if (!mad_snoop_priv) {
569-
ret = ERR_PTR(-ENOMEM);
570-
goto error1;
571-
}
572-
573-
/* Now, fill in the various structures */
574-
mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
575-
mad_snoop_priv->agent.device = device;
576-
mad_snoop_priv->agent.recv_handler = recv_handler;
577-
mad_snoop_priv->agent.snoop_handler = snoop_handler;
578-
mad_snoop_priv->agent.context = context;
579-
mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
580-
mad_snoop_priv->agent.port_num = port_num;
581-
mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
582-
init_completion(&mad_snoop_priv->comp);
583-
584-
err = ib_mad_agent_security_setup(&mad_snoop_priv->agent, qp_type);
585-
if (err) {
586-
ret = ERR_PTR(err);
587-
goto error2;
588-
}
589-
590-
mad_snoop_priv->snoop_index = register_snoop_agent(
591-
&port_priv->qp_info[qpn],
592-
mad_snoop_priv);
593-
if (mad_snoop_priv->snoop_index < 0) {
594-
ret = ERR_PTR(mad_snoop_priv->snoop_index);
595-
goto error3;
596-
}
597-
598-
atomic_set(&mad_snoop_priv->refcount, 1);
599-
return &mad_snoop_priv->agent;
600-
error3:
601-
ib_mad_agent_security_cleanup(&mad_snoop_priv->agent);
602-
error2:
603-
kfree(mad_snoop_priv);
604-
error1:
605-
return ret;
606-
}
607-
EXPORT_SYMBOL(ib_register_mad_snoop);
608-
609485
static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
610486
{
611487
if (atomic_dec_and_test(&mad_agent_priv->refcount))
612488
complete(&mad_agent_priv->comp);
613489
}
614490

615-
static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
616-
{
617-
if (atomic_dec_and_test(&mad_snoop_priv->refcount))
618-
complete(&mad_snoop_priv->comp);
619-
}
620-
621491
static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
622492
{
623493
struct ib_mad_port_private *port_priv;
@@ -650,25 +520,6 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
650520
kfree_rcu(mad_agent_priv, rcu);
651521
}
652522

653-
static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
654-
{
655-
struct ib_mad_qp_info *qp_info;
656-
unsigned long flags;
657-
658-
qp_info = mad_snoop_priv->qp_info;
659-
spin_lock_irqsave(&qp_info->snoop_lock, flags);
660-
qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
661-
atomic_dec(&qp_info->snoop_count);
662-
spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
663-
664-
deref_snoop_agent(mad_snoop_priv);
665-
wait_for_completion(&mad_snoop_priv->comp);
666-
667-
ib_mad_agent_security_cleanup(&mad_snoop_priv->agent);
668-
669-
kfree(mad_snoop_priv);
670-
}
671-
672523
/*
673524
* ib_unregister_mad_agent - Unregisters a client from using MAD services
674525
*
@@ -677,20 +528,11 @@ static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
677528
void ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
678529
{
679530
struct ib_mad_agent_private *mad_agent_priv;
680-
struct ib_mad_snoop_private *mad_snoop_priv;
681-
682-
/* If the TID is zero, the agent can only snoop. */
683-
if (mad_agent->hi_tid) {
684-
mad_agent_priv = container_of(mad_agent,
685-
struct ib_mad_agent_private,
686-
agent);
687-
unregister_mad_agent(mad_agent_priv);
688-
} else {
689-
mad_snoop_priv = container_of(mad_agent,
690-
struct ib_mad_snoop_private,
691-
agent);
692-
unregister_mad_snoop(mad_snoop_priv);
693-
}
531+
532+
mad_agent_priv = container_of(mad_agent,
533+
struct ib_mad_agent_private,
534+
agent);
535+
unregister_mad_agent(mad_agent_priv);
694536
}
695537
EXPORT_SYMBOL(ib_unregister_mad_agent);
696538

@@ -706,57 +548,6 @@ static void dequeue_mad(struct ib_mad_list_head *mad_list)
706548
spin_unlock_irqrestore(&mad_queue->lock, flags);
707549
}
708550

709-
static void snoop_send(struct ib_mad_qp_info *qp_info,
710-
struct ib_mad_send_buf *send_buf,
711-
struct ib_mad_send_wc *mad_send_wc,
712-
int mad_snoop_flags)
713-
{
714-
struct ib_mad_snoop_private *mad_snoop_priv;
715-
unsigned long flags;
716-
int i;
717-
718-
spin_lock_irqsave(&qp_info->snoop_lock, flags);
719-
for (i = 0; i < qp_info->snoop_table_size; i++) {
720-
mad_snoop_priv = qp_info->snoop_table[i];
721-
if (!mad_snoop_priv ||
722-
!(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
723-
continue;
724-
725-
atomic_inc(&mad_snoop_priv->refcount);
726-
spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
727-
mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
728-
send_buf, mad_send_wc);
729-
deref_snoop_agent(mad_snoop_priv);
730-
spin_lock_irqsave(&qp_info->snoop_lock, flags);
731-
}
732-
spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
733-
}
734-
735-
static void snoop_recv(struct ib_mad_qp_info *qp_info,
736-
struct ib_mad_recv_wc *mad_recv_wc,
737-
int mad_snoop_flags)
738-
{
739-
struct ib_mad_snoop_private *mad_snoop_priv;
740-
unsigned long flags;
741-
int i;
742-
743-
spin_lock_irqsave(&qp_info->snoop_lock, flags);
744-
for (i = 0; i < qp_info->snoop_table_size; i++) {
745-
mad_snoop_priv = qp_info->snoop_table[i];
746-
if (!mad_snoop_priv ||
747-
!(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
748-
continue;
749-
750-
atomic_inc(&mad_snoop_priv->refcount);
751-
spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
752-
mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, NULL,
753-
mad_recv_wc);
754-
deref_snoop_agent(mad_snoop_priv);
755-
spin_lock_irqsave(&qp_info->snoop_lock, flags);
756-
}
757-
spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
758-
}
759-
760551
static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid,
761552
u16 pkey_index, u8 port_num, struct ib_wc *wc)
762553
{
@@ -2289,9 +2080,6 @@ static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
22892080
recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad;
22902081
recv->header.recv_wc.recv_buf.grh = &recv->grh;
22912082

2292-
if (atomic_read(&qp_info->snoop_count))
2293-
snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
2294-
22952083
/* Validate MAD */
22962084
if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa))
22972085
goto out;
@@ -2538,9 +2326,6 @@ static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc)
25382326
mad_send_wc.send_buf = &mad_send_wr->send_buf;
25392327
mad_send_wc.status = wc->status;
25402328
mad_send_wc.vendor_err = wc->vendor_err;
2541-
if (atomic_read(&qp_info->snoop_count))
2542-
snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
2543-
IB_MAD_SNOOP_SEND_COMPLETIONS);
25442329
ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
25452330

25462331
if (queued_send_wr) {
@@ -2782,10 +2567,6 @@ static void local_completions(struct work_struct *work)
27822567
local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
27832568
local->mad_priv->header.recv_wc.recv_buf.mad =
27842569
(struct ib_mad *)local->mad_priv->mad;
2785-
if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2786-
snoop_recv(recv_mad_agent->qp_info,
2787-
&local->mad_priv->header.recv_wc,
2788-
IB_MAD_SNOOP_RECVS);
27892570
recv_mad_agent->agent.recv_handler(
27902571
&recv_mad_agent->agent,
27912572
&local->mad_send_wr->send_buf,
@@ -2800,10 +2581,6 @@ static void local_completions(struct work_struct *work)
28002581
mad_send_wc.status = IB_WC_SUCCESS;
28012582
mad_send_wc.vendor_err = 0;
28022583
mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
2803-
if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2804-
snoop_send(mad_agent_priv->qp_info,
2805-
&local->mad_send_wr->send_buf,
2806-
&mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
28072584
mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
28082585
&mad_send_wc);
28092586

@@ -3119,10 +2896,6 @@ static void init_mad_qp(struct ib_mad_port_private *port_priv,
31192896
init_mad_queue(qp_info, &qp_info->send_queue);
31202897
init_mad_queue(qp_info, &qp_info->recv_queue);
31212898
INIT_LIST_HEAD(&qp_info->overflow_list);
3122-
spin_lock_init(&qp_info->snoop_lock);
3123-
qp_info->snoop_table = NULL;
3124-
qp_info->snoop_table_size = 0;
3125-
atomic_set(&qp_info->snoop_count, 0);
31262899
}
31272900

31282901
static int create_mad_qp(struct ib_mad_qp_info *qp_info,
@@ -3166,7 +2939,6 @@ static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
31662939
return;
31672940

31682941
ib_destroy_qp(qp_info->qp);
3169-
kfree(qp_info->snoop_table);
31702942
}
31712943

31722944
/*

0 commit comments

Comments
 (0)