Basic thread synchronization in C++

This code is from the chapter 3 of the C++ Concurrency in Action book by Anthony Williams and can be downloaded directly from the book companion site. I made some minor changes and compiled it in Visual Studio 2013. Making the code available on MSDN may help many who learn multithreading in C++.

Solution has 13 projects showing different examples how to do and not to do synchronization. Please find detailed explanations in the book. Here’s a brief overview..

We start with the simplest lock_guard mechanism to ensure that only one thread has access to a list at a time.

list<int> m_list; 
mutex     m_mutex; 

void add_to_list(int value) 
{ 
    lock_guard<mutex> guard(m_mutex); 
    m_list.push_back(value); 
} 

bool list_contains(int value) 
{ 
    lock_guard<mutex> guard(m_mutex); 
    return (find(m_list.begin(), m_list.end(), value) != m_list.end()); 
} 

int main() 
{ 
    add_to_list(42); 
}
Second example shows wrong way to do synchronization where directly accessing variable memory via a pointer breaks out of thread synchronization and introduces those nasty run-time bugs.
class some_data 
{ 
    int        a; 
    string    b; 

public: 
    void do_something(){}; 
}; 

class data_wrapper 
{ 
private: 
    some_data    data; 
    mutex        m; 

public: 
    template<typename F> 
    void process_data(F func) 
    { 
        lock_guard<mutex> l(m); 
        func(data); 
    } 
}; 

some_data*        m_unprotected; 
data_wrapper    x; 

void malicious_function(some_data& protected_data) 
{ 
    m_unprotected = &protected_data; 
} 

void foo() 
{ 
    x.process_data(malicious_function); 
    m_unprotected->do_something(); 
} 

int main() 
{ 
    foo(); 
}
By the fifth example we can create a wrapper to protect access to a std::stack<T>.
struct empty_stack : exception 
{ 
    const char* what() const throw() 
    { 
        return "empty stack"; 
    } 
}; 

template <typename T> 
class threadsafe_stack 
{ 
public: 
    threadsafe_stack() 
    { 
    } 

    threadsafe_stack(const threadsafe_stack& other) 
    { 
        lock_guard<mutex> lock(other.m); 
        data = other.data; 
    } 

    threadsafe_stack& operator=(const threadsafe_stack&) = delete; 

    void push(T value) 
    { 
        lock_guard<mutex> lock(m); 
        data.push(value); 
    } 

    shared_ptr<T> pop() 
    { 
        lock_guard<mutex> lock(m); 

        if (data.empty()) 
        { 
            throw empty_stack(); 
        } 

        shared_ptr<T> const res(make_shared<T>(data.top())); 
        data.pop(); 
        return res; 
    } 

    void pop(T& value) 
    { 
        lock_guard<mutex> lock(m); 

        if (data.empty()) 
        { 
            throw empty_stack(); 
        } 

        value = data.top(); 
        data.pop(); 
    } 

    bool empty() const 
    { 
        lock_guard<mutex> lock(m); 
        return data.empty(); 
    } 

private: 
    stack<T> data; 
    mutable mutex m; 
}; 

int main() 
{ 
    threadsafe_stack<int> si; 
    si.push(5); 
    si.pop(); 

    if (!si.empty()) 
    { 
        int x; 
        si.pop(x); 
    } 
}
Example six demonstrates safe synchronization using two mutexes during swap operation. The described mechanism ensures that no matter the order in which the input parameters are passed to the swap function there will never be a deadlock.
class some_big_object 
{ 
}; 

void swap(some_big_object& lhs, some_big_object& rhs) 
{ 
} 

class X 
{ 
public: 
    X(some_big_object const& sd) 
        : data(sd) 
    { 
    } 

    friend void swap(X& lhs, X& rhs) 
    { 
        if (&lhs == &rhs) 
        { 
            return; 
        } 

        lock(lhs.m, rhs.m); 
        lock_guard<mutex> a(lhs.m, adopt_lock); 
        lock_guard<mutex> b(rhs.m, adopt_lock); 

        swap(lhs.data, rhs.data); 
    } 

private: 
    some_big_object data; 
    mutable mutex   m; 
};
Example seven shows how to manually specify mutex hierarchy. Looks like old basic language…
class hierarchical_mutex 
{ 
public: 
    explicit hierarchical_mutex(unsigned level) 
    { 
    } 

    void lock() 
    { 
    } 

    void unlock() 
    { 
    } 
}; 

hierarchical_mutex high_level_mutex(10000); 
hierarchical_mutex low_level_mutex(5000); 
hierarchical_mutex other_mutex(100); 

int low_level_func() 
{ 
    lock_guard<hierarchical_mutex> l(low_level_mutex); 
    return do_low_level_stuff(); 
} 

void high_level_func() 
{ 
    lock_guard<hierarchical_mutex> l(high_level_mutex); 
    do_high_level_stuff(low_level_func()); 
} 

void other_stuff_func() 
{ 
    high_level_func(); 
    do_other_stuff(); 
}
Example 11 demonstrates singleton pattern (avoid unless necessary) and lazy initialization.
struct some_resource 
{ 
    void do_something() 
    { 
    } 
}; 

shared_ptr<some_resource> resource; 
mutex m; 

void foo() 
{ 
    unique_lock<mutex> l(m); 

    if (resource == nullptr) 
    { 
        resource.reset(new some_resource); 
    } 

    l.unlock(); 
    resource->do_something(); 
} 

int main() 
{ 
    foo(); 
}
Eleventh sample demonstrates how to use call_once to invoke a callable object exactly one time during run-time execution.
using std::call_once; 
using std::once_flag; 

struct connection_info 
{ 
}; 

struct data_packet 
{ 
}; 

struct connection_handle 
{ 
    void send_data(data_packet const&) 
    { 
    } 

    data_packet receive_data() 
    { 
        return data_packet(); 
    } 
}; 

struct remote_connection_manager 
{ 
    connection_handle open(connection_info const&) 
    { 
        return connection_handle(); 
    } 
} connection_manager; 

class X 
{ 
public: 
    X(connection_info const& ci) 
        : info(ci) 
    { 
    } 

    void send_data(data_packet const& data) 
    { 
        call_once(init_flag, &X::open_connection, this); 
        handle.send_data(data); 
    } 

    data_packet receive_data() 
    { 
        call_once(init_flag, &X::open_connection, this); 
        return handle.receive_data(); 
    } 

private: 
    void open_connection() 
    { 
        handle = connection_manager.open(info); 
    } 

private: 
    connection_info info; 
    connection_handle handle; 
    once_flag init_flag; 
};
Final example demonstrates how to use boost library shared_mutex to have shared read access and lock_guard for exclusive write access to the data.
class dns_entry 
{ 
}; 

class dns_cache 
{ 
public: 
    dns_entry find_entry(string const& domain) 
    { 
        shared_lock<shared_mutex> l(m); 
        auto it = entries.find(domain); 
        return (it == entries.end()) ? dns_entry() : it->second; 
    } 

    void update_or_add_entry(string const& domain, dns_entry const& info) 
    { 
        lock_guard<shared_mutex> l(m); 
        entries[domain] = info; 
    } 

private: 
    map<string, dns_entry> entries; 
    shared_mutex m; 
};

Code is available here.

Leave a comment