1010
1111use libc;
1212use cell:: UnsafeCell ;
13+ use sync:: atomic:: { AtomicUsize , Ordering } ;
1314
1415pub struct RWLock {
1516 inner : UnsafeCell < libc:: pthread_rwlock_t > ,
1617 write_locked : UnsafeCell < bool > ,
18+ num_readers : AtomicUsize ,
1719}
1820
1921unsafe impl Send for RWLock { }
@@ -24,6 +26,7 @@ impl RWLock {
2426 RWLock {
2527 inner : UnsafeCell :: new ( libc:: PTHREAD_RWLOCK_INITIALIZER ) ,
2628 write_locked : UnsafeCell :: new ( false ) ,
29+ num_readers : AtomicUsize :: new ( 0 ) ,
2730 }
2831 }
2932 #[ inline]
@@ -54,23 +57,31 @@ impl RWLock {
5457 panic ! ( "rwlock read lock would result in deadlock" ) ;
5558 } else {
5659 debug_assert_eq ! ( r, 0 ) ;
60+ self . num_readers . fetch_add ( 1 , Ordering :: Relaxed ) ;
5761 }
5862 }
5963 #[ inline]
6064 pub unsafe fn try_read ( & self ) -> bool {
6165 let r = libc:: pthread_rwlock_tryrdlock ( self . inner . get ( ) ) ;
62- if r == 0 && * self . write_locked . get ( ) {
63- self . raw_unlock ( ) ;
64- false
66+ if r == 0 {
67+ if * self . write_locked . get ( ) {
68+ self . raw_unlock ( ) ;
69+ false
70+ } else {
71+ self . num_readers . fetch_add ( 1 , Ordering :: Relaxed ) ;
72+ true
73+ }
6574 } else {
66- r == 0
75+ false
6776 }
6877 }
6978 #[ inline]
7079 pub unsafe fn write ( & self ) {
7180 let r = libc:: pthread_rwlock_wrlock ( self . inner . get ( ) ) ;
72- // see comments above for why we check for EDEADLK and write_locked
73- if r == libc:: EDEADLK || * self . write_locked . get ( ) {
81+ // See comments above for why we check for EDEADLK and write_locked. We
82+ // also need to check that num_readers is 0.
83+ if r == libc:: EDEADLK || * self . write_locked . get ( ) ||
84+ self . num_readers . load ( Ordering :: Relaxed ) != 0 {
7485 if r == 0 {
7586 self . raw_unlock ( ) ;
7687 }
@@ -83,12 +94,14 @@ impl RWLock {
8394 #[ inline]
8495 pub unsafe fn try_write ( & self ) -> bool {
8596 let r = libc:: pthread_rwlock_trywrlock ( self . inner . get ( ) ) ;
86- if r == 0 && * self . write_locked . get ( ) {
87- self . raw_unlock ( ) ;
88- false
89- } else if r == 0 {
90- * self . write_locked . get ( ) = true ;
91- true
97+ if r == 0 {
98+ if * self . write_locked . get ( ) || self . num_readers . load ( Ordering :: Relaxed ) != 0 {
99+ self . raw_unlock ( ) ;
100+ false
101+ } else {
102+ * self . write_locked . get ( ) = true ;
103+ true
104+ }
92105 } else {
93106 false
94107 }
@@ -101,10 +114,12 @@ impl RWLock {
101114 #[ inline]
102115 pub unsafe fn read_unlock ( & self ) {
103116 debug_assert ! ( !* self . write_locked. get( ) ) ;
117+ self . num_readers . fetch_sub ( 1 , Ordering :: Relaxed ) ;
104118 self . raw_unlock ( ) ;
105119 }
106120 #[ inline]
107121 pub unsafe fn write_unlock ( & self ) {
122+ debug_assert_eq ! ( self . num_readers. load( Ordering :: Relaxed ) , 0 ) ;
108123 debug_assert ! ( * self . write_locked. get( ) ) ;
109124 * self . write_locked . get ( ) = false ;
110125 self . raw_unlock ( ) ;
0 commit comments