4
4
* Copyright (c) 2004 Intel Corporation. All rights reserved.
5
5
* Copyright (c) 2004 Topspin Corporation. All rights reserved.
6
6
* Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7
+ * Copyright (c) 2005 Cisco Systems. All rights reserved.
7
8
*
8
9
* This software is available to you under a choice of one of two
9
10
* licenses. You may choose to be licensed under the terms of the GNU
41
42
42
43
#include <linux/types.h>
43
44
#include <linux/device.h>
45
+
44
46
#include <asm/atomic.h>
47
+ #include <asm/scatterlist.h>
48
+ #include <asm/uaccess.h>
45
49
46
50
union ib_gid {
47
51
u8 raw [16 ];
@@ -544,7 +548,7 @@ struct ib_send_wr {
544
548
int num_sge ;
545
549
enum ib_wr_opcode opcode ;
546
550
int send_flags ;
547
- u32 imm_data ;
551
+ __be32 imm_data ;
548
552
union {
549
553
struct {
550
554
u64 remote_addr ;
@@ -618,29 +622,86 @@ struct ib_fmr_attr {
618
622
u8 page_size ;
619
623
};
620
624
625
+ struct ib_ucontext {
626
+ struct ib_device * device ;
627
+ struct list_head pd_list ;
628
+ struct list_head mr_list ;
629
+ struct list_head mw_list ;
630
+ struct list_head cq_list ;
631
+ struct list_head qp_list ;
632
+ struct list_head srq_list ;
633
+ struct list_head ah_list ;
634
+ spinlock_t lock ;
635
+ };
636
+
637
+ struct ib_uobject {
638
+ u64 user_handle ; /* handle given to us by userspace */
639
+ struct ib_ucontext * context ; /* associated user context */
640
+ struct list_head list ; /* link to context's list */
641
+ u32 id ; /* index into kernel idr */
642
+ };
643
+
644
+ struct ib_umem {
645
+ unsigned long user_base ;
646
+ unsigned long virt_base ;
647
+ size_t length ;
648
+ int offset ;
649
+ int page_size ;
650
+ int writable ;
651
+ struct list_head chunk_list ;
652
+ };
653
+
654
+ struct ib_umem_chunk {
655
+ struct list_head list ;
656
+ int nents ;
657
+ int nmap ;
658
+ struct scatterlist page_list [0 ];
659
+ };
660
+
661
+ struct ib_udata {
662
+ void __user * inbuf ;
663
+ void __user * outbuf ;
664
+ size_t inlen ;
665
+ size_t outlen ;
666
+ };
667
+
668
+ #define IB_UMEM_MAX_PAGE_CHUNK \
669
+ ((PAGE_SIZE - offsetof(struct ib_umem_chunk, page_list)) / \
670
+ ((void *) &((struct ib_umem_chunk *) 0)->page_list[1] - \
671
+ (void *) &((struct ib_umem_chunk *) 0)->page_list[0]))
672
+
673
+ struct ib_umem_object {
674
+ struct ib_uobject uobject ;
675
+ struct ib_umem umem ;
676
+ };
677
+
621
678
struct ib_pd {
622
- struct ib_device * device ;
623
- atomic_t usecnt ; /* count all resources */
679
+ struct ib_device * device ;
680
+ struct ib_uobject * uobject ;
681
+ atomic_t usecnt ; /* count all resources */
624
682
};
625
683
626
684
struct ib_ah {
627
685
struct ib_device * device ;
628
686
struct ib_pd * pd ;
687
+ struct ib_uobject * uobject ;
629
688
};
630
689
631
690
typedef void (* ib_comp_handler )(struct ib_cq * cq , void * cq_context );
632
691
633
692
struct ib_cq {
634
- struct ib_device * device ;
635
- ib_comp_handler comp_handler ;
636
- void (* event_handler )(struct ib_event * , void * );
637
- void * cq_context ;
638
- int cqe ;
639
- atomic_t usecnt ; /* count number of work queues */
693
+ struct ib_device * device ;
694
+ struct ib_uobject * uobject ;
695
+ ib_comp_handler comp_handler ;
696
+ void (* event_handler )(struct ib_event * , void * );
697
+ void * cq_context ;
698
+ int cqe ;
699
+ atomic_t usecnt ; /* count number of work queues */
640
700
};
641
701
642
702
struct ib_srq {
643
703
struct ib_device * device ;
704
+ struct ib_uobject * uobject ;
644
705
struct ib_pd * pd ;
645
706
void * srq_context ;
646
707
atomic_t usecnt ;
@@ -652,23 +713,26 @@ struct ib_qp {
652
713
struct ib_cq * send_cq ;
653
714
struct ib_cq * recv_cq ;
654
715
struct ib_srq * srq ;
716
+ struct ib_uobject * uobject ;
655
717
void (* event_handler )(struct ib_event * , void * );
656
718
void * qp_context ;
657
719
u32 qp_num ;
658
720
enum ib_qp_type qp_type ;
659
721
};
660
722
661
723
struct ib_mr {
662
- struct ib_device * device ;
663
- struct ib_pd * pd ;
664
- u32 lkey ;
665
- u32 rkey ;
666
- atomic_t usecnt ; /* count number of MWs */
724
+ struct ib_device * device ;
725
+ struct ib_pd * pd ;
726
+ struct ib_uobject * uobject ;
727
+ u32 lkey ;
728
+ u32 rkey ;
729
+ atomic_t usecnt ; /* count number of MWs */
667
730
};
668
731
669
732
struct ib_mw {
670
733
struct ib_device * device ;
671
734
struct ib_pd * pd ;
735
+ struct ib_uobject * uobject ;
672
736
u32 rkey ;
673
737
};
674
738
@@ -737,7 +801,14 @@ struct ib_device {
737
801
int (* modify_port )(struct ib_device * device ,
738
802
u8 port_num , int port_modify_mask ,
739
803
struct ib_port_modify * port_modify );
740
- struct ib_pd * (* alloc_pd )(struct ib_device * device );
804
+ struct ib_ucontext * (* alloc_ucontext )(struct ib_device * device ,
805
+ struct ib_udata * udata );
806
+ int (* dealloc_ucontext )(struct ib_ucontext * context );
807
+ int (* mmap )(struct ib_ucontext * context ,
808
+ struct vm_area_struct * vma );
809
+ struct ib_pd * (* alloc_pd )(struct ib_device * device ,
810
+ struct ib_ucontext * context ,
811
+ struct ib_udata * udata );
741
812
int (* dealloc_pd )(struct ib_pd * pd );
742
813
struct ib_ah * (* create_ah )(struct ib_pd * pd ,
743
814
struct ib_ah_attr * ah_attr );
@@ -747,7 +818,8 @@ struct ib_device {
747
818
struct ib_ah_attr * ah_attr );
748
819
int (* destroy_ah )(struct ib_ah * ah );
749
820
struct ib_qp * (* create_qp )(struct ib_pd * pd ,
750
- struct ib_qp_init_attr * qp_init_attr );
821
+ struct ib_qp_init_attr * qp_init_attr ,
822
+ struct ib_udata * udata );
751
823
int (* modify_qp )(struct ib_qp * qp ,
752
824
struct ib_qp_attr * qp_attr ,
753
825
int qp_attr_mask );
@@ -762,8 +834,9 @@ struct ib_device {
762
834
int (* post_recv )(struct ib_qp * qp ,
763
835
struct ib_recv_wr * recv_wr ,
764
836
struct ib_recv_wr * * bad_recv_wr );
765
- struct ib_cq * (* create_cq )(struct ib_device * device ,
766
- int cqe );
837
+ struct ib_cq * (* create_cq )(struct ib_device * device , int cqe ,
838
+ struct ib_ucontext * context ,
839
+ struct ib_udata * udata );
767
840
int (* destroy_cq )(struct ib_cq * cq );
768
841
int (* resize_cq )(struct ib_cq * cq , int * cqe );
769
842
int (* poll_cq )(struct ib_cq * cq , int num_entries ,
@@ -780,6 +853,10 @@ struct ib_device {
780
853
int num_phys_buf ,
781
854
int mr_access_flags ,
782
855
u64 * iova_start );
856
+ struct ib_mr * (* reg_user_mr )(struct ib_pd * pd ,
857
+ struct ib_umem * region ,
858
+ int mr_access_flags ,
859
+ struct ib_udata * udata );
783
860
int (* query_mr )(struct ib_mr * mr ,
784
861
struct ib_mr_attr * mr_attr );
785
862
int (* dereg_mr )(struct ib_mr * mr );
@@ -817,6 +894,7 @@ struct ib_device {
817
894
struct ib_mad * in_mad ,
818
895
struct ib_mad * out_mad );
819
896
897
+ struct module * owner ;
820
898
struct class_device class_dev ;
821
899
struct kobject ports_parent ;
822
900
struct list_head port_list ;
@@ -852,6 +930,16 @@ void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
852
930
void ib_set_client_data (struct ib_device * device , struct ib_client * client ,
853
931
void * data );
854
932
933
+ static inline int ib_copy_from_udata (void * dest , struct ib_udata * udata , size_t len )
934
+ {
935
+ return copy_from_user (dest , udata -> inbuf , len ) ? - EFAULT : 0 ;
936
+ }
937
+
938
+ static inline int ib_copy_to_udata (struct ib_udata * udata , void * src , size_t len )
939
+ {
940
+ return copy_to_user (udata -> outbuf , src , len ) ? - EFAULT : 0 ;
941
+ }
942
+
855
943
int ib_register_event_handler (struct ib_event_handler * event_handler );
856
944
int ib_unregister_event_handler (struct ib_event_handler * event_handler );
857
945
void ib_dispatch_event (struct ib_event * event );
0 commit comments