@@ -441,10 +441,9 @@ static const struct address_space_operations aio_ctx_aops = {
441
441
#endif
442
442
};
443
443
444
- static int aio_setup_ring (struct kioctx * ctx )
444
+ static int aio_setup_ring (struct kioctx * ctx , unsigned int nr_events )
445
445
{
446
446
struct aio_ring * ring ;
447
- unsigned nr_events = ctx -> max_reqs ;
448
447
struct mm_struct * mm = current -> mm ;
449
448
unsigned long size , unused ;
450
449
int nr_pages ;
@@ -706,6 +705,12 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
706
705
struct kioctx * ctx ;
707
706
int err = - ENOMEM ;
708
707
708
+ /*
709
+ * Store the original nr_events -- what userspace passed to io_setup(),
710
+ * for counting against the global limit -- before it changes.
711
+ */
712
+ unsigned int max_reqs = nr_events ;
713
+
709
714
/*
710
715
* We keep track of the number of available ringbuffer slots, to prevent
711
716
* overflow (reqs_available), and we also use percpu counters for this.
@@ -724,14 +729,14 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
724
729
return ERR_PTR (- EINVAL );
725
730
}
726
731
727
- if (!nr_events || (unsigned long )nr_events > ( aio_max_nr * 2UL ) )
732
+ if (!nr_events || (unsigned long )max_reqs > aio_max_nr )
728
733
return ERR_PTR (- EAGAIN );
729
734
730
735
ctx = kmem_cache_zalloc (kioctx_cachep , GFP_KERNEL );
731
736
if (!ctx )
732
737
return ERR_PTR (- ENOMEM );
733
738
734
- ctx -> max_reqs = nr_events ;
739
+ ctx -> max_reqs = max_reqs ;
735
740
736
741
spin_lock_init (& ctx -> ctx_lock );
737
742
spin_lock_init (& ctx -> completion_lock );
@@ -753,7 +758,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
753
758
if (!ctx -> cpu )
754
759
goto err ;
755
760
756
- err = aio_setup_ring (ctx );
761
+ err = aio_setup_ring (ctx , nr_events );
757
762
if (err < 0 )
758
763
goto err ;
759
764
@@ -764,8 +769,8 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
764
769
765
770
/* limit the number of system wide aios */
766
771
spin_lock (& aio_nr_lock );
767
- if (aio_nr + nr_events > ( aio_max_nr * 2UL ) ||
768
- aio_nr + nr_events < aio_nr ) {
772
+ if (aio_nr + ctx -> max_reqs > aio_max_nr ||
773
+ aio_nr + ctx -> max_reqs < aio_nr ) {
769
774
spin_unlock (& aio_nr_lock );
770
775
err = - EAGAIN ;
771
776
goto err_ctx ;
0 commit comments