summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBen Noordhuis <info@bnoordhuis.nl>2012-06-05 16:45:29 +0200
committerBen Noordhuis <info@bnoordhuis.nl>2012-06-05 16:48:17 +0200
commit27061cc9f45afbc4ddc1efa8bed1ea22df7cb0f4 (patch)
treeb349e23235822064a356a1e4684889492aa12e34
parentcc0e7efb3765ffd22d1394618e3f648cde7dfddc (diff)
downloadnode-27061cc9f45afbc4ddc1efa8bed1ea22df7cb0f4.tar.gz
udp_wrap, stream_wrap: lazy init slab allocator
Create slab allocator when binding is initialized. Add an AtExit handler to destroy the slab before the VM shuts down, it can't be disposed when V8 is dead and Valgrind will complain about memory leaks.
-rw-r--r--src/stream_wrap.cc21
-rw-r--r--src/udp_wrap.cc19
2 files changed, 29 insertions, 11 deletions
diff --git a/src/stream_wrap.cc b/src/stream_wrap.cc
index aaa5cc609..e79d212c4 100644
--- a/src/stream_wrap.cc
+++ b/src/stream_wrap.cc
@@ -76,14 +76,23 @@ static Persistent<String> bytes_sym;
static Persistent<String> write_queue_size_sym;
static Persistent<String> onread_sym;
static Persistent<String> oncomplete_sym;
-static SlabAllocator slab_allocator(SLAB_SIZE);
+static SlabAllocator* slab_allocator;
static bool initialized;
+static void DeleteSlabAllocator(void*) {
+ delete slab_allocator;
+ slab_allocator = NULL;
+}
+
+
void StreamWrap::Initialize(Handle<Object> target) {
if (initialized) return;
initialized = true;
+ slab_allocator = new SlabAllocator(SLAB_SIZE);
+ AtExit(DeleteSlabAllocator, NULL);
+
HandleScope scope;
HandleWrap::Initialize(target);
@@ -156,7 +165,7 @@ Handle<Value> StreamWrap::ReadStop(const Arguments& args) {
uv_buf_t StreamWrap::OnAlloc(uv_handle_t* handle, size_t suggested_size) {
StreamWrap* wrap = static_cast<StreamWrap*>(handle->data);
assert(wrap->stream_ == reinterpret_cast<uv_stream_t*>(handle));
- char* buf = slab_allocator.Allocate(wrap->object_, suggested_size);
+ char* buf = slab_allocator->Allocate(wrap->object_, suggested_size);
return uv_buf_init(buf, suggested_size);
}
@@ -175,7 +184,7 @@ void StreamWrap::OnReadCommon(uv_stream_t* handle, ssize_t nread,
// If libuv reports an error or EOF it *may* give us a buffer back. In that
// case, return the space to the slab.
if (buf.base != NULL) {
- slab_allocator.Shrink(wrap->object_, buf.base, 0);
+ slab_allocator->Shrink(wrap->object_, buf.base, 0);
}
SetErrno(uv_last_error(uv_default_loop()));
@@ -184,9 +193,9 @@ void StreamWrap::OnReadCommon(uv_stream_t* handle, ssize_t nread,
}
assert(buf.base != NULL);
- Local<Object> slab = slab_allocator.Shrink(wrap->object_,
- buf.base,
- nread);
+ Local<Object> slab = slab_allocator->Shrink(wrap->object_,
+ buf.base,
+ nread);
if (nread == 0) return;
assert(static_cast<size_t>(nread) <= buf.len);
diff --git a/src/udp_wrap.cc b/src/udp_wrap.cc
index b0f8bad1f..c5f0aa76d 100644
--- a/src/udp_wrap.cc
+++ b/src/udp_wrap.cc
@@ -58,7 +58,13 @@ Local<Object> AddressToJS(const sockaddr* addr);
static Persistent<String> buffer_sym;
static Persistent<String> oncomplete_sym;
static Persistent<String> onmessage_sym;
-static SlabAllocator slab_allocator(SLAB_SIZE);
+static SlabAllocator* slab_allocator;
+
+
+static void DeleteSlabAllocator(void*) {
+ delete slab_allocator;
+ slab_allocator = NULL;
+}
UDPWrap::UDPWrap(Handle<Object> object): HandleWrap(object,
@@ -76,6 +82,9 @@ UDPWrap::~UDPWrap() {
void UDPWrap::Initialize(Handle<Object> target) {
HandleWrap::Initialize(target);
+ slab_allocator = new SlabAllocator(SLAB_SIZE);
+ AtExit(DeleteSlabAllocator, NULL);
+
HandleScope scope;
buffer_sym = NODE_PSYMBOL("buffer");
@@ -352,7 +361,7 @@ void UDPWrap::OnSend(uv_udp_send_t* req, int status) {
uv_buf_t UDPWrap::OnAlloc(uv_handle_t* handle, size_t suggested_size) {
UDPWrap* wrap = static_cast<UDPWrap*>(handle->data);
- char* buf = slab_allocator.Allocate(wrap->object_, suggested_size);
+ char* buf = slab_allocator->Allocate(wrap->object_, suggested_size);
return uv_buf_init(buf, suggested_size);
}
@@ -365,9 +374,9 @@ void UDPWrap::OnRecv(uv_udp_t* handle,
HandleScope scope;
UDPWrap* wrap = reinterpret_cast<UDPWrap*>(handle->data);
- Local<Object> slab = slab_allocator.Shrink(wrap->object_,
- buf.base,
- nread < 0 ? 0 : nread);
+ Local<Object> slab = slab_allocator->Shrink(wrap->object_,
+ buf.base,
+ nread < 0 ? 0 : nread);
if (nread == 0) return;
if (nread < 0) {