diff options
Diffstat (limited to 'ace')
-rw-r--r-- | ace/OS.h | 20 | ||||
-rw-r--r-- | ace/Strategies_T.h | 43 | ||||
-rw-r--r-- | ace/Task.h | 19 | ||||
-rw-r--r-- | ace/Thread.h | 26 |
4 files changed, 104 insertions, 4 deletions
@@ -3066,6 +3066,26 @@ public: long priority = -1, void *stack = 0, size_t stacksize = 0); + // Creates a new thread having <{flags}> attributes and running <{ACE_THR_FUNC}> + // with <{args}>. <{thr_id}> and <{t_handle}> are set to the thread's ID and handle (?), + // respectively. The thread runs at <{priority}> priority (see below). + // + // The <{flags}> are a bitwise-OR of the following: + // = BEGIN<INDENT> + // THR_CANCEL_DISABLE, THR_CANCEL_ENABLE, THR_CANCEL_DEFERRED, THR_CANCEL_ASYNCHRONOUS, + // THR_BOUND, THR_NEW_LWP, THR_DETACHED, THR_SUSPENDED, THR_DAEMON, THR_JOINABLE, + // THR_SCHED_FIFO, THR_SCHED_RR, THR_SCHED_DEFAULT + // = END<INDENT> + // + // By default, or if <{priority}> is set to -1, an "appropriate" + // priority value for the given scheduling policy (specified in + // <{flags}>, e.g., <THR_SCHED_DEFAULT>) is used. This value is + // calculated dynamically, and is the median value between the + // minimum and maximum priority values for the given policy. If an + // explicit value is given, it is used. Note that actual priority + // values are EXTREMEMLY implementation-dependent, and are probably + // best avoided. + static int thr_getprio (ACE_hthread_t thr_id, int &prio); static int thr_join (ACE_hthread_t waiter_id, void **status); static int thr_join (ACE_thread_t waiter_id, ACE_thread_t *thr_id, void **status); diff --git a/ace/Strategies_T.h b/ace/Strategies_T.h index 3d02e5bfee3..964f747f9a4 100644 --- a/ace/Strategies_T.h +++ b/ace/Strategies_T.h @@ -501,9 +501,15 @@ class ACE_Hash_Addr : public ADDR_T // <ACE_Cached_Connect_Strategy>. // // = DESCRIPTION - // Intended to be used as a key to an <ACE_Hash_Map>. The + // Intended to be used as a key to an <ACE_Hash_Map_Manager>. The // <SVC_HANDLER> class is expected to implement the following - // methods: int in_use() const; void in_use(int is_used); + // methods: + // = BEGIN<INDENT> + // = BEGIN<CODE> + // int in_use() const; + // void in_use(int is_used); + // = END<CODE> + // = END<INDENT> // Likewise, the <ADDR_T> parameter/subclass is typically // <ACE_INET_Addr>. { @@ -545,8 +551,33 @@ private: template <class SVC_HANDLER, ACE_PEER_CONNECTOR_1, class MUTEX> class ACE_Cached_Connect_Strategy : public ACE_Connect_Strategy<SVC_HANDLER, ACE_PEER_CONNECTOR_2> // = TITLE + // A connection strategy which caches connections to peers + // (represented by <SVC_HANDLER> instances), thereby allowing + // subsequent re-use of unused, but available, connections. // // = DESCRIPTION + // <ACE_Cached_Connect_Strategy> is intended to be used as a + // plug-in connection strategy for <ACE_Strategy_Connector>. + // It's added value is re-use of established connections. + // + // = USAGE + // In order to use this appropriately, the user must provide + // a template specialization for <ACE_Hash_Addr::compare_i()> and + // <ACE_Hash_Addr::hash_i()> based on the address type and the + // service handler type. For example, a specialization using + // <ACE_INET_Addr> and <My_Service_Handler> might be: + // = BEGIN<NOFILL> + // = BEGIN<CODE> + // size_t + // ACE_Hash_Addr<ACE_INET_Addr, My_Service_Handler>::hash_i(const ACE_INET_Addr &a) + // { + // return ...; + // } + // = END<CODE> + // = END<NOFILL> + // + // = SEE ALSO + // <ACE_Hash_Addr>. { public: virtual int connect_svc_handler (SVC_HANDLER *&sh, @@ -560,6 +591,14 @@ public: // connected to the <remote_addr>. If so, we return this pointer. // Otherwise we establish the connection, put it into the cache, and // return the <SVC_HANDLER> pointer. + // <[NOTE]>: the <{reuse_addr}> argument does NOT control re-use of + // addresses in the cache. Rather, if the underlying protocol + // requires a "dead time" prior to re-use of its addresses (TCP + // is a classic example of this), <{and}> the protocol provides a means + // by which to defeat the dead time, setting this argument to non-zero + // will defeat the dead-time requirement. <{Dev. Note: We might want + // to consider enhancing the interface at some point so that this also + // controls re-use of the cache.}> private: ACE_Hash_Map_Manager <ACE_Hash_Addr <ACE_PEER_CONNECTOR_ADDR,SVC_HANDLER>, SVC_HANDLER*, MUTEX> connection_cache_; diff --git a/ace/Task.h b/ace/Task.h index a1f7b555d35..39b5f430c5a 100644 --- a/ace/Task.h +++ b/ace/Task.h @@ -94,13 +94,30 @@ public: int grp_id = -1, ACE_Task_Base *task = 0); // Turn the task into an active object, i.e., having <n_threads> of - // control, all running at the <priority> level with the same + // control, all running at the <priority> level (see below) with the same // <grp_id>, all of which invoke <Task::svc>. Returns -1 if failure // occurs, returns 1 if Task is already an active object and // <force_active> is false (doesn't *not* create a new thread in // this case), and returns 0 if Task was not already an active // object and a thread is created successfully or thread is an // active object and <force_active> is true. + // + // The <{flags}> are a bitwise-OR of the following: + // = BEGIN<INDENT> + // THR_CANCEL_DISABLE, THR_CANCEL_ENABLE, THR_CANCEL_DEFERRED, + // THR_CANCEL_ASYNCHRONOUS, THR_BOUND, THR_NEW_LWP, THR_DETACHED, + // THR_SUSPENDED, THR_DAEMON, THR_JOINABLE, THR_SCHED_FIFO, + // THR_SCHED_RR, THR_SCHED_DEFAULT + // = END<INDENT> + // + // By default, or if <{priority}> is set to -1, an "appropriate" + // priority value for the given scheduling policy (specified in + // <{flags}>, e.g., <THR_SCHED_DEFAULT>) is used. This value is + // calculated dynamically, and is the median value between the + // minimum and maximum priority values for the given policy. If an + // explicit value is given, it is used. Note that actual priority + // values are EXTREMEMLY implementation-dependent, and are probably + // best avoided. // = Suspend/resume a Task virtual int suspend (void); diff --git a/ace/Thread.h b/ace/Thread.h index 17e3bf4ac47..49faf53aec7 100644 --- a/ace/Thread.h +++ b/ace/Thread.h @@ -37,7 +37,27 @@ public: long priority = -1, void *stack = 0, size_t stack_size = 0); - // Spawn a new thread, which executes "func" with argument "arg". + // Spawn a new thread having <{flags}> attributes and running + // <{func}> with arguments <{args}>. <{thr_id}> and <{t_handle}> + // are set to the thread's ID and handle (?), respectively. The + // thread runs at <{priority}> priority (see below). + // + // The <{flags}> are a bitwise-OR of the following: + // = BEGIN<INDENT> + // THR_CANCEL_DISABLE, THR_CANCEL_ENABLE, THR_CANCEL_DEFERRED, + // THR_CANCEL_ASYNCHRONOUS, THR_BOUND, THR_NEW_LWP, THR_DETACHED, + // THR_SUSPENDED, THR_DAEMON, THR_JOINABLE, THR_SCHED_FIFO, + // THR_SCHED_RR, THR_SCHED_DEFAULT + // = END<INDENT> + // + // By default, or if <{priority}> is set to -1, an "appropriate" + // priority value for the given scheduling policy (specified in + // <{flags}>, e.g., <THR_SCHED_DEFAULT>) is used. This value is + // calculated dynamically, and is the median value between the + // minimum and maximum priority values for the given policy. If an + // explicit value is given, it is used. Note that actual priority + // values are EXTREMEMLY implementation-dependent, and are probably + // best avoided. static int spawn_n (size_t n, ACE_THR_FUNC func, @@ -54,6 +74,8 @@ public: // are. Returns the number of threads actually spawned (if this // doesn't equal the number requested then something has gone wrong // and <errno> will explain...). + // + // See also <spawn>. static int spawn_n (ACE_thread_t thread_ids[], size_t n, @@ -76,6 +98,8 @@ public: // handles being spawned. Returns the number of threads actually // spawned (if this doesn't equal the number requested then // something has gone wrong and <errno> will explain...). + // + // See also <spawn>. static int join (const ACE_Thread_ID &, void **status = 0); |