|
61 | 61 | * 4. Global variable peer_total is modified under the pool lock.
|
62 | 62 | * 5. struct inet_peer fields modification:
|
63 | 63 | * avl_left, avl_right, avl_parent, avl_height: pool lock
|
64 |
| - * unused_next, unused_prevp: unused node list lock |
| 64 | + * unused: unused node list lock |
65 | 65 | * refcnt: atomically against modifications on other CPU;
|
66 | 66 | * usually under some other lock to prevent node disappearing
|
67 | 67 | * dtime: unused node list lock
|
@@ -94,8 +94,7 @@ int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min
|
94 | 94 | int inet_peer_gc_mintime __read_mostly = 10 * HZ;
|
95 | 95 | int inet_peer_gc_maxtime __read_mostly = 120 * HZ;
|
96 | 96 |
|
97 |
| -static struct inet_peer *inet_peer_unused_head; |
98 |
| -static struct inet_peer **inet_peer_unused_tailp = &inet_peer_unused_head; |
| 97 | +static LIST_HEAD(unused_peers); |
99 | 98 | static DEFINE_SPINLOCK(inet_peer_unused_lock);
|
100 | 99 |
|
101 | 100 | static void peer_check_expire(unsigned long dummy);
|
@@ -138,15 +137,7 @@ void __init inet_initpeers(void)
|
138 | 137 | static void unlink_from_unused(struct inet_peer *p)
|
139 | 138 | {
|
140 | 139 | spin_lock_bh(&inet_peer_unused_lock);
|
141 |
| - if (p->unused_prevp != NULL) { |
142 |
| - /* On unused list. */ |
143 |
| - *p->unused_prevp = p->unused_next; |
144 |
| - if (p->unused_next != NULL) |
145 |
| - p->unused_next->unused_prevp = p->unused_prevp; |
146 |
| - else |
147 |
| - inet_peer_unused_tailp = p->unused_prevp; |
148 |
| - p->unused_prevp = NULL; /* mark it as removed */ |
149 |
| - } |
| 140 | + list_del_init(&p->unused); |
150 | 141 | spin_unlock_bh(&inet_peer_unused_lock);
|
151 | 142 | }
|
152 | 143 |
|
@@ -337,24 +328,24 @@ static void unlink_from_pool(struct inet_peer *p)
|
337 | 328 | /* May be called with local BH enabled. */
|
338 | 329 | static int cleanup_once(unsigned long ttl)
|
339 | 330 | {
|
340 |
| - struct inet_peer *p; |
| 331 | + struct inet_peer *p = NULL; |
341 | 332 |
|
342 | 333 | /* Remove the first entry from the list of unused nodes. */
|
343 | 334 | spin_lock_bh(&inet_peer_unused_lock);
|
344 |
| - p = inet_peer_unused_head; |
345 |
| - if (p != NULL) { |
346 |
| - __u32 delta = (__u32)jiffies - p->dtime; |
| 335 | + if (!list_empty(&unused_peers)) { |
| 336 | + __u32 delta; |
| 337 | + |
| 338 | + p = list_first_entry(&unused_peers, struct inet_peer, unused); |
| 339 | + delta = (__u32)jiffies - p->dtime; |
| 340 | + |
347 | 341 | if (delta < ttl) {
|
348 | 342 | /* Do not prune fresh entries. */
|
349 | 343 | spin_unlock_bh(&inet_peer_unused_lock);
|
350 | 344 | return -1;
|
351 | 345 | }
|
352 |
| - inet_peer_unused_head = p->unused_next; |
353 |
| - if (p->unused_next != NULL) |
354 |
| - p->unused_next->unused_prevp = p->unused_prevp; |
355 |
| - else |
356 |
| - inet_peer_unused_tailp = p->unused_prevp; |
357 |
| - p->unused_prevp = NULL; /* mark as not on the list */ |
| 346 | + |
| 347 | + list_del_init(&p->unused); |
| 348 | + |
358 | 349 | /* Grab an extra reference to prevent node disappearing
|
359 | 350 | * before unlink_from_pool() call. */
|
360 | 351 | atomic_inc(&p->refcnt);
|
@@ -412,7 +403,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
|
412 | 403 |
|
413 | 404 | /* Link the node. */
|
414 | 405 | link_to_pool(n);
|
415 |
| - n->unused_prevp = NULL; /* not on the list */ |
| 406 | + INIT_LIST_HEAD(&n->unused); |
416 | 407 | peer_total++;
|
417 | 408 | write_unlock_bh(&peer_pool_lock);
|
418 | 409 |
|
@@ -467,10 +458,7 @@ void inet_putpeer(struct inet_peer *p)
|
467 | 458 | {
|
468 | 459 | spin_lock_bh(&inet_peer_unused_lock);
|
469 | 460 | if (atomic_dec_and_test(&p->refcnt)) {
|
470 |
| - p->unused_prevp = inet_peer_unused_tailp; |
471 |
| - p->unused_next = NULL; |
472 |
| - *inet_peer_unused_tailp = p; |
473 |
| - inet_peer_unused_tailp = &p->unused_next; |
| 461 | + list_add_tail(&p->unused, &unused_peers); |
474 | 462 | p->dtime = (__u32)jiffies;
|
475 | 463 | }
|
476 | 464 | spin_unlock_bh(&inet_peer_unused_lock);
|
|
0 commit comments