@@ -99,6 +99,7 @@ static struct kmem_cache *odstate_slab;
99
99
static void free_session (struct nfsd4_session * );
100
100
101
101
static const struct nfsd4_callback_ops nfsd4_cb_recall_ops ;
102
+ static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops ;
102
103
103
104
static bool is_session_dead (struct nfsd4_session * ses )
104
105
{
@@ -210,6 +211,84 @@ static void nfsd4_put_session(struct nfsd4_session *ses)
210
211
spin_unlock (& nn -> client_lock );
211
212
}
212
213
214
+ static struct nfsd4_blocked_lock *
215
+ find_blocked_lock (struct nfs4_lockowner * lo , struct knfsd_fh * fh ,
216
+ struct nfsd_net * nn )
217
+ {
218
+ struct nfsd4_blocked_lock * cur , * found = NULL ;
219
+
220
+ spin_lock (& nn -> client_lock );
221
+ list_for_each_entry (cur , & lo -> lo_blocked , nbl_list ) {
222
+ if (fh_match (fh , & cur -> nbl_fh )) {
223
+ list_del_init (& cur -> nbl_list );
224
+ found = cur ;
225
+ break ;
226
+ }
227
+ }
228
+ spin_unlock (& nn -> client_lock );
229
+ if (found )
230
+ posix_unblock_lock (& found -> nbl_lock );
231
+ return found ;
232
+ }
233
+
234
+ static struct nfsd4_blocked_lock *
235
+ find_or_allocate_block (struct nfs4_lockowner * lo , struct knfsd_fh * fh ,
236
+ struct nfsd_net * nn )
237
+ {
238
+ struct nfsd4_blocked_lock * nbl ;
239
+
240
+ nbl = find_blocked_lock (lo , fh , nn );
241
+ if (!nbl ) {
242
+ nbl = kmalloc (sizeof (* nbl ), GFP_KERNEL );
243
+ if (nbl ) {
244
+ fh_copy_shallow (& nbl -> nbl_fh , fh );
245
+ locks_init_lock (& nbl -> nbl_lock );
246
+ nfsd4_init_cb (& nbl -> nbl_cb , lo -> lo_owner .so_client ,
247
+ & nfsd4_cb_notify_lock_ops ,
248
+ NFSPROC4_CLNT_CB_NOTIFY_LOCK );
249
+ }
250
+ }
251
+ return nbl ;
252
+ }
253
+
254
+ static void
255
+ free_blocked_lock (struct nfsd4_blocked_lock * nbl )
256
+ {
257
+ locks_release_private (& nbl -> nbl_lock );
258
+ kfree (nbl );
259
+ }
260
+
261
+ static int
262
+ nfsd4_cb_notify_lock_done (struct nfsd4_callback * cb , struct rpc_task * task )
263
+ {
264
+ /*
265
+ * Since this is just an optimization, we don't try very hard if it
266
+ * turns out not to succeed. We'll requeue it on NFS4ERR_DELAY, and
267
+ * just quit trying on anything else.
268
+ */
269
+ switch (task -> tk_status ) {
270
+ case - NFS4ERR_DELAY :
271
+ rpc_delay (task , 1 * HZ );
272
+ return 0 ;
273
+ default :
274
+ return 1 ;
275
+ }
276
+ }
277
+
278
+ static void
279
+ nfsd4_cb_notify_lock_release (struct nfsd4_callback * cb )
280
+ {
281
+ struct nfsd4_blocked_lock * nbl = container_of (cb ,
282
+ struct nfsd4_blocked_lock , nbl_cb );
283
+
284
+ free_blocked_lock (nbl );
285
+ }
286
+
287
+ static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
288
+ .done = nfsd4_cb_notify_lock_done ,
289
+ .release = nfsd4_cb_notify_lock_release ,
290
+ };
291
+
213
292
static inline struct nfs4_stateowner *
214
293
nfs4_get_stateowner (struct nfs4_stateowner * sop )
215
294
{
@@ -5309,7 +5388,29 @@ nfsd4_fl_put_owner(fl_owner_t owner)
5309
5388
nfs4_put_stateowner (& lo -> lo_owner );
5310
5389
}
5311
5390
5391
+ static void
5392
+ nfsd4_lm_notify (struct file_lock * fl )
5393
+ {
5394
+ struct nfs4_lockowner * lo = (struct nfs4_lockowner * )fl -> fl_owner ;
5395
+ struct net * net = lo -> lo_owner .so_client -> net ;
5396
+ struct nfsd_net * nn = net_generic (net , nfsd_net_id );
5397
+ struct nfsd4_blocked_lock * nbl = container_of (fl ,
5398
+ struct nfsd4_blocked_lock , nbl_lock );
5399
+ bool queue = false;
5400
+
5401
+ spin_lock (& nn -> client_lock );
5402
+ if (!list_empty (& nbl -> nbl_list )) {
5403
+ list_del_init (& nbl -> nbl_list );
5404
+ queue = true;
5405
+ }
5406
+ spin_unlock (& nn -> client_lock );
5407
+
5408
+ if (queue )
5409
+ nfsd4_run_cb (& nbl -> nbl_cb );
5410
+ }
5411
+
5312
5412
static const struct lock_manager_operations nfsd_posix_mng_ops = {
5413
+ .lm_notify = nfsd4_lm_notify ,
5313
5414
.lm_get_owner = nfsd4_fl_get_owner ,
5314
5415
.lm_put_owner = nfsd4_fl_put_owner ,
5315
5416
};
@@ -5407,6 +5508,7 @@ alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
5407
5508
lo = alloc_stateowner (lockowner_slab , & lock -> lk_new_owner , clp );
5408
5509
if (!lo )
5409
5510
return NULL ;
5511
+ INIT_LIST_HEAD (& lo -> lo_blocked );
5410
5512
INIT_LIST_HEAD (& lo -> lo_owner .so_stateids );
5411
5513
lo -> lo_owner .so_is_open_owner = 0 ;
5412
5514
lo -> lo_owner .so_seqid = lock -> lk_new_lock_seqid ;
@@ -5588,12 +5690,15 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5588
5690
struct nfs4_ol_stateid * open_stp = NULL ;
5589
5691
struct nfs4_file * fp ;
5590
5692
struct file * filp = NULL ;
5693
+ struct nfsd4_blocked_lock * nbl = NULL ;
5591
5694
struct file_lock * file_lock = NULL ;
5592
5695
struct file_lock * conflock = NULL ;
5593
5696
__be32 status = 0 ;
5594
5697
int lkflg ;
5595
5698
int err ;
5596
5699
bool new = false;
5700
+ unsigned char fl_type ;
5701
+ unsigned int fl_flags = FL_POSIX ;
5597
5702
struct net * net = SVC_NET (rqstp );
5598
5703
struct nfsd_net * nn = net_generic (net , nfsd_net_id );
5599
5704
@@ -5658,46 +5763,55 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5658
5763
if (!locks_in_grace (net ) && lock -> lk_reclaim )
5659
5764
goto out ;
5660
5765
5661
- file_lock = locks_alloc_lock ();
5662
- if (!file_lock ) {
5663
- dprintk ("NFSD: %s: unable to allocate lock!\n" , __func__ );
5664
- status = nfserr_jukebox ;
5665
- goto out ;
5666
- }
5667
-
5668
5766
fp = lock_stp -> st_stid .sc_file ;
5669
5767
switch (lock -> lk_type ) {
5670
- case NFS4_READ_LT :
5671
5768
case NFS4_READW_LT :
5769
+ if (nfsd4_has_session (cstate ))
5770
+ fl_flags |= FL_SLEEP ;
5771
+ /* Fallthrough */
5772
+ case NFS4_READ_LT :
5672
5773
spin_lock (& fp -> fi_lock );
5673
5774
filp = find_readable_file_locked (fp );
5674
5775
if (filp )
5675
5776
get_lock_access (lock_stp , NFS4_SHARE_ACCESS_READ );
5676
5777
spin_unlock (& fp -> fi_lock );
5677
- file_lock -> fl_type = F_RDLCK ;
5778
+ fl_type = F_RDLCK ;
5678
5779
break ;
5679
- case NFS4_WRITE_LT :
5680
5780
case NFS4_WRITEW_LT :
5781
+ if (nfsd4_has_session (cstate ))
5782
+ fl_flags |= FL_SLEEP ;
5783
+ /* Fallthrough */
5784
+ case NFS4_WRITE_LT :
5681
5785
spin_lock (& fp -> fi_lock );
5682
5786
filp = find_writeable_file_locked (fp );
5683
5787
if (filp )
5684
5788
get_lock_access (lock_stp , NFS4_SHARE_ACCESS_WRITE );
5685
5789
spin_unlock (& fp -> fi_lock );
5686
- file_lock -> fl_type = F_WRLCK ;
5790
+ fl_type = F_WRLCK ;
5687
5791
break ;
5688
5792
default :
5689
5793
status = nfserr_inval ;
5690
5794
goto out ;
5691
5795
}
5796
+
5692
5797
if (!filp ) {
5693
5798
status = nfserr_openmode ;
5694
5799
goto out ;
5695
5800
}
5696
5801
5802
+ nbl = find_or_allocate_block (lock_sop , & fp -> fi_fhandle , nn );
5803
+ if (!nbl ) {
5804
+ dprintk ("NFSD: %s: unable to allocate block!\n" , __func__ );
5805
+ status = nfserr_jukebox ;
5806
+ goto out ;
5807
+ }
5808
+
5809
+ file_lock = & nbl -> nbl_lock ;
5810
+ file_lock -> fl_type = fl_type ;
5697
5811
file_lock -> fl_owner = (fl_owner_t )lockowner (nfs4_get_stateowner (& lock_sop -> lo_owner ));
5698
5812
file_lock -> fl_pid = current -> tgid ;
5699
5813
file_lock -> fl_file = filp ;
5700
- file_lock -> fl_flags = FL_POSIX ;
5814
+ file_lock -> fl_flags = fl_flags ;
5701
5815
file_lock -> fl_lmops = & nfsd_posix_mng_ops ;
5702
5816
file_lock -> fl_start = lock -> lk_offset ;
5703
5817
file_lock -> fl_end = last_byte_offset (lock -> lk_offset , lock -> lk_length );
@@ -5710,18 +5824,27 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5710
5824
goto out ;
5711
5825
}
5712
5826
5827
+ if (fl_flags & FL_SLEEP ) {
5828
+ spin_lock (& nn -> client_lock );
5829
+ list_add_tail (& nbl -> nbl_list , & lock_sop -> lo_blocked );
5830
+ spin_unlock (& nn -> client_lock );
5831
+ }
5832
+
5713
5833
err = vfs_lock_file (filp , F_SETLK , file_lock , conflock );
5714
- switch (- err ) {
5834
+ switch (err ) {
5715
5835
case 0 : /* success! */
5716
5836
nfs4_inc_and_copy_stateid (& lock -> lk_resp_stateid , & lock_stp -> st_stid );
5717
5837
status = 0 ;
5718
5838
break ;
5719
- case (EAGAIN ): /* conflock holds conflicting lock */
5839
+ case FILE_LOCK_DEFERRED :
5840
+ nbl = NULL ;
5841
+ /* Fallthrough */
5842
+ case - EAGAIN : /* conflock holds conflicting lock */
5720
5843
status = nfserr_denied ;
5721
5844
dprintk ("NFSD: nfsd4_lock: conflicting lock found!\n" );
5722
5845
nfs4_set_lock_denied (conflock , & lock -> lk_denied );
5723
5846
break ;
5724
- case ( EDEADLK ) :
5847
+ case - EDEADLK :
5725
5848
status = nfserr_deadlock ;
5726
5849
break ;
5727
5850
default :
@@ -5730,6 +5853,15 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5730
5853
break ;
5731
5854
}
5732
5855
out :
5856
+ if (nbl ) {
5857
+ /* dequeue it if we queued it before */
5858
+ if (fl_flags & FL_SLEEP ) {
5859
+ spin_lock (& nn -> client_lock );
5860
+ list_del_init (& nbl -> nbl_list );
5861
+ spin_unlock (& nn -> client_lock );
5862
+ }
5863
+ free_blocked_lock (nbl );
5864
+ }
5733
5865
if (filp )
5734
5866
fput (filp );
5735
5867
if (lock_stp ) {
@@ -5753,8 +5885,6 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5753
5885
if (open_stp )
5754
5886
nfs4_put_stid (& open_stp -> st_stid );
5755
5887
nfsd4_bump_seqid (cstate , status );
5756
- if (file_lock )
5757
- locks_free_lock (file_lock );
5758
5888
if (conflock )
5759
5889
locks_free_lock (conflock );
5760
5890
return status ;
0 commit comments