@@ -42,15 +42,15 @@ void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
42
42
u16 hfs_bnode_read_u16 (struct hfs_bnode * node , int off )
43
43
{
44
44
__be16 data ;
45
- // optimize later...
45
+ /* TODO: optimize later... */
46
46
hfs_bnode_read (node , & data , off , 2 );
47
47
return be16_to_cpu (data );
48
48
}
49
49
50
50
u8 hfs_bnode_read_u8 (struct hfs_bnode * node , int off )
51
51
{
52
52
u8 data ;
53
- // optimize later...
53
+ /* TODO: optimize later... */
54
54
hfs_bnode_read (node , & data , off , 1 );
55
55
return data ;
56
56
}
@@ -96,7 +96,7 @@ void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len)
96
96
void hfs_bnode_write_u16 (struct hfs_bnode * node , int off , u16 data )
97
97
{
98
98
__be16 v = cpu_to_be16 (data );
99
- // optimize later...
99
+ /* TODO: optimize later... */
100
100
hfs_bnode_write (node , & v , off , 2 );
101
101
}
102
102
@@ -212,7 +212,8 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
212
212
dst_page -- ;
213
213
}
214
214
src -= len ;
215
- memmove (kmap (* dst_page ) + src , kmap (* src_page ) + src , len );
215
+ memmove (kmap (* dst_page ) + src ,
216
+ kmap (* src_page ) + src , len );
216
217
kunmap (* src_page );
217
218
set_page_dirty (* dst_page );
218
219
kunmap (* dst_page );
@@ -250,14 +251,16 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
250
251
251
252
if (src == dst ) {
252
253
l = min (len , (int )PAGE_CACHE_SIZE - src );
253
- memmove (kmap (* dst_page ) + src , kmap (* src_page ) + src , l );
254
+ memmove (kmap (* dst_page ) + src ,
255
+ kmap (* src_page ) + src , l );
254
256
kunmap (* src_page );
255
257
set_page_dirty (* dst_page );
256
258
kunmap (* dst_page );
257
259
258
260
while ((len -= l ) != 0 ) {
259
261
l = min (len , (int )PAGE_CACHE_SIZE );
260
- memmove (kmap (* ++ dst_page ), kmap (* ++ src_page ), l );
262
+ memmove (kmap (* ++ dst_page ),
263
+ kmap (* ++ src_page ), l );
261
264
kunmap (* src_page );
262
265
set_page_dirty (* dst_page );
263
266
kunmap (* dst_page );
@@ -268,7 +271,8 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
268
271
do {
269
272
src_ptr = kmap (* src_page ) + src ;
270
273
dst_ptr = kmap (* dst_page ) + dst ;
271
- if (PAGE_CACHE_SIZE - src < PAGE_CACHE_SIZE - dst ) {
274
+ if (PAGE_CACHE_SIZE - src <
275
+ PAGE_CACHE_SIZE - dst ) {
272
276
l = PAGE_CACHE_SIZE - src ;
273
277
src = 0 ;
274
278
dst += l ;
@@ -340,7 +344,8 @@ void hfs_bnode_unlink(struct hfs_bnode *node)
340
344
return ;
341
345
tmp -> next = node -> next ;
342
346
cnid = cpu_to_be32 (tmp -> next );
343
- hfs_bnode_write (tmp , & cnid , offsetof(struct hfs_bnode_desc , next ), 4 );
347
+ hfs_bnode_write (tmp , & cnid ,
348
+ offsetof(struct hfs_bnode_desc , next ), 4 );
344
349
hfs_bnode_put (tmp );
345
350
} else if (node -> type == HFS_NODE_LEAF )
346
351
tree -> leaf_head = node -> next ;
@@ -351,15 +356,15 @@ void hfs_bnode_unlink(struct hfs_bnode *node)
351
356
return ;
352
357
tmp -> prev = node -> prev ;
353
358
cnid = cpu_to_be32 (tmp -> prev );
354
- hfs_bnode_write (tmp , & cnid , offsetof(struct hfs_bnode_desc , prev ), 4 );
359
+ hfs_bnode_write (tmp , & cnid ,
360
+ offsetof(struct hfs_bnode_desc , prev ), 4 );
355
361
hfs_bnode_put (tmp );
356
362
} else if (node -> type == HFS_NODE_LEAF )
357
363
tree -> leaf_tail = node -> prev ;
358
364
359
- // move down?
360
- if (!node -> prev && !node -> next ) {
361
- printk (KERN_DEBUG "hfs_btree_del_level\n" );
362
- }
365
+ /* move down? */
366
+ if (!node -> prev && !node -> next )
367
+ dprint (DBG_BNODE_MOD , "hfs_btree_del_level\n" );
363
368
if (!node -> parent ) {
364
369
tree -> root = 0 ;
365
370
tree -> depth = 0 ;
@@ -379,16 +384,16 @@ struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid)
379
384
struct hfs_bnode * node ;
380
385
381
386
if (cnid >= tree -> node_count ) {
382
- printk (KERN_ERR "hfs: request for non-existent node %d in B*Tree\n" , cnid );
387
+ printk (KERN_ERR "hfs: request for non-existent node "
388
+ "%d in B*Tree\n" ,
389
+ cnid );
383
390
return NULL ;
384
391
}
385
392
386
393
for (node = tree -> node_hash [hfs_bnode_hash (cnid )];
387
- node ; node = node -> next_hash ) {
388
- if (node -> this == cnid ) {
394
+ node ; node = node -> next_hash )
395
+ if (node -> this == cnid )
389
396
return node ;
390
- }
391
- }
392
397
return NULL ;
393
398
}
394
399
@@ -402,7 +407,9 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
402
407
loff_t off ;
403
408
404
409
if (cnid >= tree -> node_count ) {
405
- printk (KERN_ERR "hfs: request for non-existent node %d in B*Tree\n" , cnid );
410
+ printk (KERN_ERR "hfs: request for non-existent node "
411
+ "%d in B*Tree\n" ,
412
+ cnid );
406
413
return NULL ;
407
414
}
408
415
@@ -429,7 +436,8 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
429
436
} else {
430
437
spin_unlock (& tree -> hash_lock );
431
438
kfree (node );
432
- wait_event (node2 -> lock_wq , !test_bit (HFS_BNODE_NEW , & node2 -> flags ));
439
+ wait_event (node2 -> lock_wq ,
440
+ !test_bit (HFS_BNODE_NEW , & node2 -> flags ));
433
441
return node2 ;
434
442
}
435
443
spin_unlock (& tree -> hash_lock );
@@ -483,7 +491,8 @@ struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num)
483
491
if (node ) {
484
492
hfs_bnode_get (node );
485
493
spin_unlock (& tree -> hash_lock );
486
- wait_event (node -> lock_wq , !test_bit (HFS_BNODE_NEW , & node -> flags ));
494
+ wait_event (node -> lock_wq ,
495
+ !test_bit (HFS_BNODE_NEW , & node -> flags ));
487
496
if (test_bit (HFS_BNODE_ERROR , & node -> flags ))
488
497
goto node_error ;
489
498
return node ;
@@ -497,7 +506,8 @@ struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num)
497
506
if (!test_bit (HFS_BNODE_NEW , & node -> flags ))
498
507
return node ;
499
508
500
- desc = (struct hfs_bnode_desc * )(kmap (node -> page [0 ]) + node -> page_offset );
509
+ desc = (struct hfs_bnode_desc * )(kmap (node -> page [0 ]) +
510
+ node -> page_offset );
501
511
node -> prev = be32_to_cpu (desc -> prev );
502
512
node -> next = be32_to_cpu (desc -> next );
503
513
node -> num_recs = be16_to_cpu (desc -> num_recs );
@@ -556,11 +566,13 @@ struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num)
556
566
557
567
void hfs_bnode_free (struct hfs_bnode * node )
558
568
{
559
- //int i;
569
+ #if 0
570
+ int i ;
560
571
561
- //for (i = 0; i < node->tree->pages_per_bnode; i++)
562
- // if (node->page[i])
563
- // page_cache_release(node->page[i]);
572
+ for (i = 0 ; i < node -> tree -> pages_per_bnode ; i ++ )
573
+ if (node -> page [i ])
574
+ page_cache_release (node -> page [i ]);
575
+ #endif
564
576
kfree (node );
565
577
}
566
578
@@ -607,7 +619,8 @@ void hfs_bnode_get(struct hfs_bnode *node)
607
619
if (node ) {
608
620
atomic_inc (& node -> refcnt );
609
621
dprint (DBG_BNODE_REFS , "get_node(%d:%d): %d\n" ,
610
- node -> tree -> cnid , node -> this , atomic_read (& node -> refcnt ));
622
+ node -> tree -> cnid , node -> this ,
623
+ atomic_read (& node -> refcnt ));
611
624
}
612
625
}
613
626
@@ -619,7 +632,8 @@ void hfs_bnode_put(struct hfs_bnode *node)
619
632
int i ;
620
633
621
634
dprint (DBG_BNODE_REFS , "put_node(%d:%d): %d\n" ,
622
- node -> tree -> cnid , node -> this , atomic_read (& node -> refcnt ));
635
+ node -> tree -> cnid , node -> this ,
636
+ atomic_read (& node -> refcnt ));
623
637
BUG_ON (!atomic_read (& node -> refcnt ));
624
638
if (!atomic_dec_and_lock (& node -> refcnt , & tree -> hash_lock ))
625
639
return ;
0 commit comments