summaryrefslogtreecommitdiff
path: root/oslo_concurrency
diff options
context:
space:
mode:
authorDenis Buliga <dbuliga@cloudbasesolutions.com>2016-08-08 15:53:57 +0300
committerDenis Buliga <dbuliga@cloudbasesolutions.com>2016-08-08 19:13:44 +0300
commit3c46e8f776f0e593a7c4515e3471d3b146ba0068 (patch)
treebc69c072705bd374e1b3191b5779816147b39e0a /oslo_concurrency
parent2e8d5481b1a3036e9d3e1f684b95cd123022b39f (diff)
downloadoslo-concurrency-3c46e8f776f0e593a7c4515e3471d3b146ba0068.tar.gz
Fix external lock tests on Windows
At the moment, those tests use fcntl to acquire file locks, which will fail on Windows. This change addresses this issue by adding some platform checks, using the appropriate functions when handling file locks. Note that we've avoided running private methods from fasteners. Also, we now avoid using os.fork, which won't work on Windows. Instead, we spawn new processes using multiprocessing.Process, using queues for communication between processes. Change-Id: I9839b9033c814280f6d1b53c5ed2643fd2bf8bf8
Diffstat (limited to 'oslo_concurrency')
-rw-r--r--oslo_concurrency/tests/unit/test_lockutils.py97
1 files changed, 55 insertions, 42 deletions
diff --git a/oslo_concurrency/tests/unit/test_lockutils.py b/oslo_concurrency/tests/unit/test_lockutils.py
index 75d324a..667028e 100644
--- a/oslo_concurrency/tests/unit/test_lockutils.py
+++ b/oslo_concurrency/tests/unit/test_lockutils.py
@@ -13,7 +13,7 @@
# under the License.
import collections
-import fcntl
+import multiprocessing
import os
import shutil
import signal
@@ -31,6 +31,50 @@ from oslo_concurrency.fixture import lockutils as fixtures
from oslo_concurrency import lockutils
from oslo_config import fixture as config
+if sys.platform == 'win32':
+ import msvcrt
+else:
+ import fcntl
+
+
+def lock_file(handle):
+ if sys.platform == 'win32':
+ msvcrt.locking(handle.fileno(), msvcrt.LK_NBLCK, 1)
+ else:
+ fcntl.flock(handle, fcntl.LOCK_EX | fcntl.LOCK_NB)
+
+
+def unlock_file(handle):
+ if sys.platform == 'win32':
+ msvcrt.locking(handle.fileno(), msvcrt.LK_UNLCK, 1)
+ else:
+ fcntl.flock(handle, fcntl.LOCK_UN)
+
+
+def lock_files(handles_dir, out_queue):
+ with lockutils.lock('external', 'test-', external=True):
+ # Open some files we can use for locking
+ handles = []
+ for n in range(50):
+ path = os.path.join(handles_dir, ('file-%s' % n))
+ handles.append(open(path, 'w'))
+
+ # Loop over all the handles and try locking the file
+ # without blocking, keep a count of how many files we
+ # were able to lock and then unlock. If the lock fails
+ # we get an IOError and bail out with bad exit code
+ count = 0
+ for handle in handles:
+ try:
+ lock_file(handle)
+ count += 1
+ unlock_file(handle)
+ except IOError:
+ os._exit(2)
+ finally:
+ handle.close()
+ return out_queue.put(count)
+
class LockTestCase(test_base.BaseTestCase):
@@ -126,51 +170,20 @@ class LockTestCase(test_base.BaseTestCase):
def _do_test_lock_externally(self):
"""We can lock across multiple processes."""
-
- def lock_files(handles_dir):
-
- with lockutils.lock('external', 'test-', external=True):
- # Open some files we can use for locking
- handles = []
- for n in range(50):
- path = os.path.join(handles_dir, ('file-%s' % n))
- handles.append(open(path, 'w'))
-
- # Loop over all the handles and try locking the file
- # without blocking, keep a count of how many files we
- # were able to lock and then unlock. If the lock fails
- # we get an IOError and bail out with bad exit code
- count = 0
- for handle in handles:
- try:
- fcntl.flock(handle, fcntl.LOCK_EX | fcntl.LOCK_NB)
- count += 1
- fcntl.flock(handle, fcntl.LOCK_UN)
- except IOError:
- os._exit(2)
- finally:
- handle.close()
-
- # Check if we were able to open all files
- self.assertEqual(50, count)
-
handles_dir = tempfile.mkdtemp()
try:
children = []
for n in range(50):
- pid = os.fork()
- if pid:
- children.append(pid)
- else:
- try:
- lock_files(handles_dir)
- finally:
- os._exit(0)
-
- for child in children:
- (pid, status) = os.waitpid(child, 0)
- if pid:
- self.assertEqual(0, status)
+ queue = multiprocessing.Queue()
+ proc = multiprocessing.Process(
+ target=lock_files,
+ args=(handles_dir, queue))
+ proc.start()
+ children.append((proc, queue))
+ for child, queue in children:
+ child.join()
+ count = queue.get(block=False)
+ self.assertEqual(50, count)
finally:
if os.path.exists(handles_dir):
shutil.rmtree(handles_dir, ignore_errors=True)