lock.c 2.6 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798
  1. #include <linux/reiserfs_fs.h>
  2. #include <linux/mutex.h>
  3. /*
  4. * The previous reiserfs locking scheme was heavily based on
  5. * the tricky properties of the Bkl:
  6. *
  7. * - it was acquired recursively by a same task
  8. * - the performances relied on the release-while-schedule() property
  9. *
  10. * Now that we replace it by a mutex, we still want to keep the same
  11. * recursive property to avoid big changes in the code structure.
  12. * We use our own lock_owner here because the owner field on a mutex
  13. * is only available in SMP or mutex debugging, also we only need this field
  14. * for this mutex, no need for a system wide mutex facility.
  15. *
  16. * Also this lock is often released before a call that could block because
  17. * reiserfs performances were partially based on the release while schedule()
  18. * property of the Bkl.
  19. */
  20. void reiserfs_write_lock(struct super_block *s)
  21. {
  22. struct reiserfs_sb_info *sb_i = REISERFS_SB(s);
  23. if (sb_i->lock_owner != current) {
  24. mutex_lock(&sb_i->lock);
  25. sb_i->lock_owner = current;
  26. }
  27. /* No need to protect it, only the current task touches it */
  28. sb_i->lock_depth++;
  29. }
  30. void reiserfs_write_unlock(struct super_block *s)
  31. {
  32. struct reiserfs_sb_info *sb_i = REISERFS_SB(s);
  33. /*
  34. * Are we unlocking without even holding the lock?
  35. * Such a situation must raise a BUG() if we don't want
  36. * to corrupt the data.
  37. */
  38. BUG_ON(sb_i->lock_owner != current);
  39. if (--sb_i->lock_depth == -1) {
  40. sb_i->lock_owner = NULL;
  41. mutex_unlock(&sb_i->lock);
  42. }
  43. }
  44. /*
  45. * If we already own the lock, just exit and don't increase the depth.
  46. * Useful when we don't want to lock more than once.
  47. *
  48. * We always return the lock_depth we had before calling
  49. * this function.
  50. */
  51. int reiserfs_write_lock_once(struct super_block *s)
  52. {
  53. struct reiserfs_sb_info *sb_i = REISERFS_SB(s);
  54. if (sb_i->lock_owner != current) {
  55. mutex_lock(&sb_i->lock);
  56. sb_i->lock_owner = current;
  57. return sb_i->lock_depth++;
  58. }
  59. return sb_i->lock_depth;
  60. }
  61. void reiserfs_write_unlock_once(struct super_block *s, int lock_depth)
  62. {
  63. if (lock_depth == -1)
  64. reiserfs_write_unlock(s);
  65. }
  66. /*
  67. * Utility function to force a BUG if it is called without the superblock
  68. * write lock held. caller is the string printed just before calling BUG()
  69. */
  70. void reiserfs_check_lock_depth(struct super_block *sb, char *caller)
  71. {
  72. struct reiserfs_sb_info *sb_i = REISERFS_SB(sb);
  73. if (sb_i->lock_depth < 0)
  74. reiserfs_panic(sb, "%s called without kernel lock held %d",
  75. caller);
  76. }
  77. #ifdef CONFIG_REISERFS_CHECK
  78. void reiserfs_lock_check_recursive(struct super_block *sb)
  79. {
  80. struct reiserfs_sb_info *sb_i = REISERFS_SB(sb);
  81. WARN_ONCE((sb_i->lock_depth > 0), "Unwanted recursive reiserfs lock!\n");
  82. }
  83. #endif