lean cpp library
A lean C++ library providing efficient utility classes for high-performance C++ applications.
shareable_spin_lock.h
00001 /*****************************************************/
00002 /* lean Concurrent              (c) Tobias Zirr 2011 */
00003 /*****************************************************/
00004 
00005 #ifndef LEAN_CONCURRENT_SHAREABLE_SPINLOCK
00006 #define LEAN_CONCURRENT_SHAREABLE_SPINLOCK
00007 
00008 #include "../lean.h"
00009 #include "../tags/noncopyable.h"
00010 #include "atomic.h"
00011 
00012 // Include automatically to encourage use of scoped_lock
00013 #include "shareable_lock_policies.h"
00014 #include "../smart/scoped_lock.h"
00015 
00016 namespace lean
00017 {
00018 namespace concurrent
00019 {
00020 
00022 template <class Counter = long>
00023 class shareable_spin_lock : public noncopyable
00024 {
00025 private:
00026     Counter m_counter;
00027     Counter m_exclCounter;
00028 
00029 public:
00031     shareable_spin_lock()
00032         : m_counter(0),
00033         m_exclCounter(0) {  }
00034 
00036     LEAN_INLINE bool try_lock()
00037     {
00038         return atomic_test_and_set(m_counter, static_cast<Counter>(0), static_cast<Counter>(-1));
00039     }
00040 
00043     LEAN_INLINE bool try_upgrade_lock()
00044     {
00045         return atomic_test_and_set(m_counter, static_cast<Counter>(1), static_cast<Counter>(-1));
00046     }
00047 
00049     LEAN_INLINE void lock()
00050     {
00051         atomic_increment(m_exclCounter);
00052         while (!try_lock());
00053         atomic_decrement(m_exclCounter);
00054     }
00055 
00058     LEAN_INLINE void upgrade_lock()
00059     {
00060         atomic_increment(m_exclCounter);
00061         
00062         // Unlock required, otherwise multiple upgrade
00063         // calls at the same time will lead to deadlocks
00064         unlock_shared();
00065         while (!try_lock());
00066         
00067         atomic_decrement(m_exclCounter);
00068     }
00069 
00071     LEAN_INLINE void downgrade_lock()
00072     {
00073         atomic_test_and_set(m_counter, static_cast<Counter>(-1), static_cast<Counter>(1));
00074     }
00075 
00077     LEAN_INLINE void unlock()
00078     {
00079         atomic_test_and_set(m_counter, static_cast<Counter>(-1), static_cast<Counter>(0));
00080     }
00081 
00083     LEAN_INLINE bool try_lock_shared()
00084     {
00085         while (static_cast<volatile Counter&>(m_exclCounter) == static_cast<Counter>(0))
00086         {
00087             Counter counter = static_cast<volatile Counter&>(m_counter);
00088 
00089             if (counter == static_cast<Counter>(-1))
00090                 return false;
00091             else if (atomic_test_and_set(m_counter, counter, static_cast<Counter>(counter + 1)))
00092                 return true;
00093         }
00094 
00095         return false;
00096     }
00097 
00099     LEAN_INLINE void lock_shared()
00100     {
00101         while (!try_lock_shared());
00102     }
00103 
00105     LEAN_INLINE void unlock_shared()
00106     {
00107         Counter counter;
00108 
00109         do
00110         {
00111             counter = static_cast<volatile Counter&>(m_counter);
00112         }
00113         while (counter != static_cast<Counter>(-1) && counter != static_cast<Counter>(0) &&
00114             atomic_test_and_set(m_counter, counter, static_cast<Counter>(counter - 1)) );
00115     }
00116 };
00117 
00119 typedef smart::scoped_lock< shareable_spin_lock<> > scoped_ssl_lock;
00121 typedef smart::scoped_lock< shareable_spin_lock<>, shared_lock_policy< shareable_spin_lock<> > > scoped_ssl_lock_shared;
00123 typedef smart::scoped_lock< shareable_spin_lock<>, upgrade_lock_policy< shareable_spin_lock<> > > scoped_ssl_upgrade_lock;
00124 
00125 } // namespace
00126 
00127 using concurrent::shareable_spin_lock;
00128 
00129 using concurrent::scoped_ssl_lock;
00130 using concurrent::scoped_ssl_lock_shared;
00131 using concurrent::scoped_ssl_upgrade_lock;
00132 
00133 } // namespace
00134 
00135 #endif