DriveHQ Start Menu
Cloud Drive Mapping
Folder Sync
Cloud Backup
True Drop Box
FTP/SFTP Hosting
Group Account
DriveHQ Start Menu
Online File Server
My Storage
|
Manage Shares
|
Publishes
|
Drop Boxes
|
Group Account
WebDAV Drive Mapping
Cloud Drive Home
|
WebDAV Guide
|
Drive Mapping Tool
|
Drive Mapping URL
Complete Data Backup
Backup Guide
|
Online Backup Tool
|
Cloud-to-Cloud Backup
FTP, Email & Web Service
FTP Home
|
FTP Hosting FAQ
|
Email Hosting
|
EmailManager
|
Web Hosting
Help & Resources
About
|
Enterprise Service
|
Partnership
|
Comparisons
|
Support
Quick Links
Security and Privacy
Download Software
Service Manual
Use Cases
Group Account
Online Help
Blog
Contact
Cloud Surveillance
Sign Up
Login
Features
Business Features
Online File Server
FTP Hosting
Cloud Drive Mapping
Cloud File Backup
Email Backup & Hosting
Cloud File Sharing
Folder Synchronization
Group Management
True Drop Box
Full-text Search
AD Integration/SSO
Mobile Access
IP Camera & DVR Solution
More...
Personal Features
Personal Cloud Drive
Backup All Devices
Mobile APPs
Personal Web Hosting
Sub-Account (for Kids)
Home/PC/Kids Monitoring
More...
Software
DriveHQ Drive Mapping Tool
DriveHQ FileManager
DriveHQ Online Backup
DriveHQ Mobile Apps
Pricing
Business Plans & Pricing
Personal Plans & Pricing
Price Comparison with Others
Feature Comparison with Others
Install Mobile App
Sign up
Creating account...
Invalid character in username! Only 0-9, a-z, A-Z, _, -, . allowed.
Username is required!
Invalid email address!
E-mail is required!
Password is required!
Password is invalid!
Password and confirmation do not match.
Confirm password is required!
I accept
Membership Agreement
Please read the Membership Agreement and check "I accept"!
Free Quick Sign-up
Sign-up Page
Log in
Signing in...
Username or e-mail address is required!
Password is required!
Keep me logged in
Quick Login
Forgot Password
Up
Upload
Download
Share
Publish
New Folder
New File
Copy
Cut
Delete
Paste
Rate
Upgrade
Rotate
Effect
Edit
Slide
History
// // kqueue_reactor.hpp // ~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2008 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2005 Stefan Arentz (stefan at soze dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef BOOST_ASIO_DETAIL_KQUEUE_REACTOR_HPP #define BOOST_ASIO_DETAIL_KQUEUE_REACTOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include
#include
#if defined(BOOST_ASIO_HAS_KQUEUE) #include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
// Older versions of Mac OS X may not define EV_OOBAND. #if !defined(EV_OOBAND) # define EV_OOBAND EV_FLAG1 #endif // !defined(EV_OOBAND) namespace boost { namespace asio { namespace detail { template
class kqueue_reactor : public boost::asio::detail::service_base
> { public: // Constructor. kqueue_reactor(boost::asio::io_service& io_service) : boost::asio::detail::service_base< kqueue_reactor
>(io_service), mutex_(), kqueue_fd_(do_kqueue_create()), wait_in_progress_(false), interrupter_(), read_op_queue_(), write_op_queue_(), except_op_queue_(), pending_cancellations_(), stop_thread_(false), thread_(0), shutdown_(false), need_kqueue_wait_(true) { // Start the reactor's internal thread only if needed. if (Own_Thread) { boost::asio::detail::signal_blocker sb; thread_ = new boost::asio::detail::thread( bind_handler(&kqueue_reactor::call_run_thread, this)); } // Add the interrupter's descriptor to the kqueue. struct kevent event; EV_SET(&event, interrupter_.read_descriptor(), EVFILT_READ, EV_ADD, 0, 0, 0); ::kevent(kqueue_fd_, &event, 1, 0, 0, 0); } // Destructor. ~kqueue_reactor() { shutdown_service(); close(kqueue_fd_); } // Destroy all user-defined handler objects owned by the service. void shutdown_service() { boost::asio::detail::mutex::scoped_lock lock(mutex_); shutdown_ = true; stop_thread_ = true; lock.unlock(); if (thread_) { interrupter_.interrupt(); thread_->join(); delete thread_; thread_ = 0; } read_op_queue_.destroy_operations(); write_op_queue_.destroy_operations(); except_op_queue_.destroy_operations(); for (std::size_t i = 0; i < timer_queues_.size(); ++i) timer_queues_[i]->destroy_timers(); timer_queues_.clear(); } // Register a socket with the reactor. Returns 0 on success, system error // code on failure. int register_descriptor(socket_type) { return 0; } // Start a new read operation. The handler object will be invoked when the // given descriptor is ready to be read, or an error has occurred. template
void start_read_op(socket_type descriptor, Handler handler) { boost::asio::detail::mutex::scoped_lock lock(mutex_); if (shutdown_) return; if (!read_op_queue_.has_operation(descriptor)) if (handler(boost::system::error_code())) return; if (read_op_queue_.enqueue_operation(descriptor, handler)) { struct kevent event; EV_SET(&event, descriptor, EVFILT_READ, EV_ADD, 0, 0, 0); if (::kevent(kqueue_fd_, &event, 1, 0, 0, 0) == -1) { boost::system::error_code ec(errno, boost::asio::error::get_system_category()); read_op_queue_.dispatch_all_operations(descriptor, ec); } } } // Start a new write operation. The handler object will be invoked when the // given descriptor is ready to be written, or an error has occurred. template
void start_write_op(socket_type descriptor, Handler handler) { boost::asio::detail::mutex::scoped_lock lock(mutex_); if (shutdown_) return; if (!write_op_queue_.has_operation(descriptor)) if (handler(boost::system::error_code())) return; if (write_op_queue_.enqueue_operation(descriptor, handler)) { struct kevent event; EV_SET(&event, descriptor, EVFILT_WRITE, EV_ADD, 0, 0, 0); if (::kevent(kqueue_fd_, &event, 1, 0, 0, 0) == -1) { boost::system::error_code ec(errno, boost::asio::error::get_system_category()); write_op_queue_.dispatch_all_operations(descriptor, ec); } } } // Start a new exception operation. The handler object will be invoked when // the given descriptor has exception information, or an error has occurred. template
void start_except_op(socket_type descriptor, Handler handler) { boost::asio::detail::mutex::scoped_lock lock(mutex_); if (shutdown_) return; if (except_op_queue_.enqueue_operation(descriptor, handler)) { struct kevent event; if (read_op_queue_.has_operation(descriptor)) EV_SET(&event, descriptor, EVFILT_READ, EV_ADD, 0, 0, 0); else EV_SET(&event, descriptor, EVFILT_READ, EV_ADD, EV_OOBAND, 0, 0); if (::kevent(kqueue_fd_, &event, 1, 0, 0, 0) == -1) { boost::system::error_code ec(errno, boost::asio::error::get_system_category()); except_op_queue_.dispatch_all_operations(descriptor, ec); } } } // Start new write and exception operations. The handler object will be // invoked when the given descriptor is ready for writing or has exception // information available, or an error has occurred. template
void start_write_and_except_ops(socket_type descriptor, Handler handler) { boost::asio::detail::mutex::scoped_lock lock(mutex_); if (shutdown_) return; if (write_op_queue_.enqueue_operation(descriptor, handler)) { struct kevent event; EV_SET(&event, descriptor, EVFILT_WRITE, EV_ADD, 0, 0, 0); if (::kevent(kqueue_fd_, &event, 1, 0, 0, 0) == -1) { boost::system::error_code ec(errno, boost::asio::error::get_system_category()); write_op_queue_.dispatch_all_operations(descriptor, ec); } } if (except_op_queue_.enqueue_operation(descriptor, handler)) { struct kevent event; if (read_op_queue_.has_operation(descriptor)) EV_SET(&event, descriptor, EVFILT_READ, EV_ADD, 0, 0, 0); else EV_SET(&event, descriptor, EVFILT_READ, EV_ADD, EV_OOBAND, 0, 0); if (::kevent(kqueue_fd_, &event, 1, 0, 0, 0) == -1) { boost::system::error_code ec(errno, boost::asio::error::get_system_category()); except_op_queue_.dispatch_all_operations(descriptor, ec); write_op_queue_.dispatch_all_operations(descriptor, ec); } } } // Cancel all operations associated with the given descriptor. The // handlers associated with the descriptor will be invoked with the // operation_aborted error. void cancel_ops(socket_type descriptor) { boost::asio::detail::mutex::scoped_lock lock(mutex_); cancel_ops_unlocked(descriptor); } // Enqueue cancellation of all operations associated with the given // descriptor. The handlers associated with the descriptor will be invoked // with the operation_aborted error. This function does not acquire the // kqueue_reactor's mutex, and so should only be used from within a reactor // handler. void enqueue_cancel_ops_unlocked(socket_type descriptor) { pending_cancellations_.push_back(descriptor); } // Cancel any operations that are running against the descriptor and remove // its registration from the reactor. void close_descriptor(socket_type descriptor) { boost::asio::detail::mutex::scoped_lock lock(mutex_); // Remove the descriptor from kqueue. struct kevent event[2]; EV_SET(&event[0], descriptor, EVFILT_READ, EV_DELETE, 0, 0, 0); EV_SET(&event[1], descriptor, EVFILT_WRITE, EV_DELETE, 0, 0, 0); ::kevent(kqueue_fd_, event, 2, 0, 0, 0); // Cancel any outstanding operations associated with the descriptor. cancel_ops_unlocked(descriptor); } // Add a new timer queue to the reactor. template
void add_timer_queue(timer_queue
& timer_queue) { boost::asio::detail::mutex::scoped_lock lock(mutex_); timer_queues_.push_back(&timer_queue); } // Remove a timer queue from the reactor. template
void remove_timer_queue(timer_queue
& timer_queue) { boost::asio::detail::mutex::scoped_lock lock(mutex_); for (std::size_t i = 0; i < timer_queues_.size(); ++i) { if (timer_queues_[i] == &timer_queue) { timer_queues_.erase(timer_queues_.begin() + i); return; } } } // Schedule a timer in the given timer queue to expire at the specified // absolute time. The handler object will be invoked when the timer expires. template
void schedule_timer(timer_queue
& timer_queue, const typename Time_Traits::time_type& time, Handler handler, void* token) { boost::asio::detail::mutex::scoped_lock lock(mutex_); if (!shutdown_) if (timer_queue.enqueue_timer(time, handler, token)) interrupter_.interrupt(); } // Cancel the timer associated with the given token. Returns the number of // handlers that have been posted or dispatched. template
std::size_t cancel_timer(timer_queue
& timer_queue, void* token) { boost::asio::detail::mutex::scoped_lock lock(mutex_); std::size_t n = timer_queue.cancel_timer(token); if (n > 0) interrupter_.interrupt(); return n; } private: friend class task_io_service
>; // Run the kqueue loop. void run(bool block) { boost::asio::detail::mutex::scoped_lock lock(mutex_); // Dispatch any operation cancellations that were made while the select // loop was not running. read_op_queue_.dispatch_cancellations(); write_op_queue_.dispatch_cancellations(); except_op_queue_.dispatch_cancellations(); for (std::size_t i = 0; i < timer_queues_.size(); ++i) timer_queues_[i]->dispatch_cancellations(); // Check if the thread is supposed to stop. if (stop_thread_) { cleanup_operations_and_timers(lock); return; } // We can return immediately if there's no work to do and the reactor is // not supposed to block. if (!block && read_op_queue_.empty() && write_op_queue_.empty() && except_op_queue_.empty() && all_timer_queues_are_empty()) { cleanup_operations_and_timers(lock); return; } // Determine how long to block while waiting for events. timespec timeout_buf = { 0, 0 }; timespec* timeout = block ? get_timeout(timeout_buf) : &timeout_buf; wait_in_progress_ = true; lock.unlock(); // Block on the kqueue descriptor. struct kevent events[128]; int num_events = (block || need_kqueue_wait_) ? kevent(kqueue_fd_, 0, 0, events, 128, timeout) : 0; lock.lock(); wait_in_progress_ = false; // Block signals while dispatching operations. boost::asio::detail::signal_blocker sb; // Dispatch the waiting events. for (int i = 0; i < num_events; ++i) { int descriptor = events[i].ident; if (descriptor == interrupter_.read_descriptor()) { interrupter_.reset(); } else if (events[i].filter == EVFILT_READ) { // Dispatch operations associated with the descriptor. bool more_reads = false; bool more_except = false; if (events[i].flags & EV_ERROR) { boost::system::error_code error( events[i].data, boost::asio::error::get_system_category()); except_op_queue_.dispatch_all_operations(descriptor, error); read_op_queue_.dispatch_all_operations(descriptor, error); } else if (events[i].flags & EV_OOBAND) { boost::system::error_code error; more_except = except_op_queue_.dispatch_operation(descriptor, error); if (events[i].data > 0) more_reads = read_op_queue_.dispatch_operation(descriptor, error); else more_reads = read_op_queue_.has_operation(descriptor); } else { boost::system::error_code error; more_reads = read_op_queue_.dispatch_operation(descriptor, error); more_except = except_op_queue_.has_operation(descriptor); } // Update the descriptor in the kqueue. struct kevent event; if (more_reads) EV_SET(&event, descriptor, EVFILT_READ, EV_ADD, 0, 0, 0); else if (more_except) EV_SET(&event, descriptor, EVFILT_READ, EV_ADD, EV_OOBAND, 0, 0); else EV_SET(&event, descriptor, EVFILT_READ, EV_DELETE, 0, 0, 0); if (::kevent(kqueue_fd_, &event, 1, 0, 0, 0) == -1) { boost::system::error_code error(errno, boost::asio::error::get_system_category()); except_op_queue_.dispatch_all_operations(descriptor, error); read_op_queue_.dispatch_all_operations(descriptor, error); } } else if (events[i].filter == EVFILT_WRITE) { // Dispatch operations associated with the descriptor. bool more_writes = false; if (events[i].flags & EV_ERROR) { boost::system::error_code error( events[i].data, boost::asio::error::get_system_category()); write_op_queue_.dispatch_all_operations(descriptor, error); } else { boost::system::error_code error; more_writes = write_op_queue_.dispatch_operation(descriptor, error); } // Update the descriptor in the kqueue. struct kevent event; if (more_writes) EV_SET(&event, descriptor, EVFILT_WRITE, EV_ADD, 0, 0, 0); else EV_SET(&event, descriptor, EVFILT_WRITE, EV_DELETE, 0, 0, 0); if (::kevent(kqueue_fd_, &event, 1, 0, 0, 0) == -1) { boost::system::error_code error(errno, boost::asio::error::get_system_category()); write_op_queue_.dispatch_all_operations(descriptor, error); } } } read_op_queue_.dispatch_cancellations(); write_op_queue_.dispatch_cancellations(); except_op_queue_.dispatch_cancellations(); for (std::size_t i = 0; i < timer_queues_.size(); ++i) { timer_queues_[i]->dispatch_timers(); timer_queues_[i]->dispatch_cancellations(); } // Issue any pending cancellations. for (std::size_t i = 0; i < pending_cancellations_.size(); ++i) cancel_ops_unlocked(pending_cancellations_[i]); pending_cancellations_.clear(); // Determine whether kqueue needs to be called next time the reactor is run. need_kqueue_wait_ = !read_op_queue_.empty() || !write_op_queue_.empty() || !except_op_queue_.empty(); cleanup_operations_and_timers(lock); } // Run the select loop in the thread. void run_thread() { boost::asio::detail::mutex::scoped_lock lock(mutex_); while (!stop_thread_) { lock.unlock(); run(true); lock.lock(); } } // Entry point for the select loop thread. static void call_run_thread(kqueue_reactor* reactor) { reactor->run_thread(); } // Interrupt the select loop. void interrupt() { interrupter_.interrupt(); } // Create the kqueue file descriptor. Throws an exception if the descriptor // cannot be created. static int do_kqueue_create() { int fd = kqueue(); if (fd == -1) { boost::throw_exception( boost::system::system_error( boost::system::error_code(errno, boost::asio::error::get_system_category()), "kqueue")); } return fd; } // Check if all timer queues are empty. bool all_timer_queues_are_empty() const { for (std::size_t i = 0; i < timer_queues_.size(); ++i) if (!timer_queues_[i]->empty()) return false; return true; } // Get the timeout value for the kevent call. timespec* get_timeout(timespec& ts) { if (all_timer_queues_are_empty()) return 0; // By default we will wait no longer than 5 minutes. This will ensure that // any changes to the system clock are detected after no longer than this. boost::posix_time::time_duration minimum_wait_duration = boost::posix_time::minutes(5); for (std::size_t i = 0; i < timer_queues_.size(); ++i) { boost::posix_time::time_duration wait_duration = timer_queues_[i]->wait_duration(); if (wait_duration < minimum_wait_duration) minimum_wait_duration = wait_duration; } if (minimum_wait_duration > boost::posix_time::time_duration()) { ts.tv_sec = minimum_wait_duration.total_seconds(); ts.tv_nsec = minimum_wait_duration.total_nanoseconds() % 1000000000; } else { ts.tv_sec = 0; ts.tv_nsec = 0; } return &ts; } // Cancel all operations associated with the given descriptor. The do_cancel // function of the handler objects will be invoked. This function does not // acquire the kqueue_reactor's mutex. void cancel_ops_unlocked(socket_type descriptor) { bool interrupt = read_op_queue_.cancel_operations(descriptor); interrupt = write_op_queue_.cancel_operations(descriptor) || interrupt; interrupt = except_op_queue_.cancel_operations(descriptor) || interrupt; if (interrupt) interrupter_.interrupt(); } // Clean up operations and timers. We must not hold the lock since the // destructors may make calls back into this reactor. We make a copy of the // vector of timer queues since the original may be modified while the lock // is not held. void cleanup_operations_and_timers( boost::asio::detail::mutex::scoped_lock& lock) { timer_queues_for_cleanup_ = timer_queues_; lock.unlock(); read_op_queue_.cleanup_operations(); write_op_queue_.cleanup_operations(); except_op_queue_.cleanup_operations(); for (std::size_t i = 0; i < timer_queues_for_cleanup_.size(); ++i) timer_queues_for_cleanup_[i]->cleanup_timers(); } // Mutex to protect access to internal data. boost::asio::detail::mutex mutex_; // The kqueue file descriptor. int kqueue_fd_; // Whether the kqueue wait call is currently in progress bool wait_in_progress_; // The interrupter is used to break a blocking kevent call. select_interrupter interrupter_; // The queue of read operations. reactor_op_queue
read_op_queue_; // The queue of write operations. reactor_op_queue
write_op_queue_; // The queue of except operations. reactor_op_queue
except_op_queue_; // The timer queues. std::vector
timer_queues_; // A copy of the timer queues, used when cleaning up timers. The copy is // stored as a class data member to avoid unnecessary memory allocation. std::vector
timer_queues_for_cleanup_; // The descriptors that are pending cancellation. std::vector
pending_cancellations_; // Does the reactor loop thread need to stop. bool stop_thread_; // The thread that is running the reactor loop. boost::asio::detail::thread* thread_; // Whether the service has been shut down. bool shutdown_; // Whether we need to call kqueue the next time the reactor is run. bool need_kqueue_wait_; }; } // namespace detail } // namespace asio } // namespace boost #endif // defined(BOOST_ASIO_HAS_KQUEUE) #include
#endif // BOOST_ASIO_DETAIL_KQUEUE_REACTOR_HPP
kqueue_reactor.hpp
Page URL
File URL
Prev
19/76
Next
Download
( 21 KB )
Note: The DriveHQ service banners will NOT be displayed if the file owner is a paid member.
Comments
Total ratings:
0
Average rating:
Not Rated
Would you like to comment?
Join DriveHQ
for a free account, or
Logon
if you are already a member.