1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
|
/* Copyright(C) 2019 MariaDB Corporation.
This program is free software; you can redistribute itand /or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111 - 1301 USA*/
#include "tpool_structs.h"
#include <algorithm>
#include <assert.h>
#include <condition_variable>
#include <iostream>
#include <limits.h>
#include <mutex>
#include <queue>
#include <stack>
#include <thread>
#include <vector>
#include <tpool.h>
namespace tpool
{
/*
Windows AIO implementation, completion port based.
A single thread collects the completion notification with
GetQueuedCompletionStatus(), and forwards io completion callback
the worker threadpool
*/
class tpool_generic_win_aio : public aio
{
/* Thread that does collects completion status from the completion port. */
std::thread m_thread;
/* IOCP Completion port.*/
HANDLE m_completion_port;
/* The worker pool where completion routine is executed, as task. */
thread_pool* m_pool;
public:
tpool_generic_win_aio(thread_pool* pool, int max_io) : m_pool(pool)
{
m_completion_port = CreateIoCompletionPort(INVALID_HANDLE_VALUE, 0, 0, 0);
m_thread = std::thread(aio_completion_thread_proc, this);
}
/**
Task to be executed in the work pool.
*/
static void io_completion_task(void* data)
{
auto cb = (aiocb*)data;
cb->execute_callback();
}
void completion_thread_work()
{
for (;;)
{
DWORD n_bytes;
aiocb* aiocb;
ULONG_PTR key;
if (!GetQueuedCompletionStatus(m_completion_port, &n_bytes, &key,
(LPOVERLAPPED*)& aiocb, INFINITE))
break;
aiocb->m_err = 0;
aiocb->m_ret_len = n_bytes;
if (n_bytes != aiocb->m_len)
{
if (GetOverlappedResult(aiocb->m_fh, aiocb,
(LPDWORD)& aiocb->m_ret_len, FALSE))
{
aiocb->m_err = GetLastError();
}
}
aiocb->m_internal_task.m_func = aiocb->m_callback;
aiocb->m_internal_task.m_arg = aiocb;
aiocb->m_internal_task.m_group = aiocb->m_group;
m_pool->submit_task(&aiocb->m_internal_task);
}
}
static void aio_completion_thread_proc(tpool_generic_win_aio* aio)
{
aio->completion_thread_work();
}
~tpool_generic_win_aio()
{
if (m_completion_port)
CloseHandle(m_completion_port);
m_thread.join();
}
virtual int submit_io(aiocb* cb) override
{
memset((OVERLAPPED *)cb, 0, sizeof(OVERLAPPED));
cb->m_internal = this;
ULARGE_INTEGER uli;
uli.QuadPart = cb->m_offset;
cb->Offset = uli.LowPart;
cb->OffsetHigh = uli.HighPart;
BOOL ok;
if (cb->m_opcode == aio_opcode::AIO_PREAD)
ok = ReadFile(cb->m_fh.m_handle, cb->m_buffer, cb->m_len, 0, cb);
else
ok = WriteFile(cb->m_fh.m_handle, cb->m_buffer, cb->m_len, 0, cb);
if (ok || (GetLastError() == ERROR_IO_PENDING))
return 0;
return -1;
}
// Inherited via aio
virtual int bind(native_file_handle& fd) override
{
return CreateIoCompletionPort(fd, m_completion_port, 0, 0) ? 0
: GetLastError();
}
virtual int unbind(const native_file_handle& fd) override { return 0; }
};
aio* create_win_aio(thread_pool* pool, int max_io)
{
return new tpool_generic_win_aio(pool, max_io);
}
} // namespace tpool
|