summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLorry Tar Creator <lorry-tar-importer@baserock.org>2010-03-02 09:35:54 +0000
committer <>2014-12-08 18:37:23 +0000
commit02378192d5bb4b16498d87ace57da425166426bf (patch)
tree2e940dd7284d31c7d32808d9c6635a57547363cb
downloadpython-daemon-02378192d5bb4b16498d87ace57da425166426bf.tar.gz
Imported from /home/lorry/working-area/delta_python-packages_python-daemon/python-daemon-1.5.5.tar.gz.python-daemon-1.5.5
-rw-r--r--ChangeLog187
-rw-r--r--LICENSE.GPL-2339
-rw-r--r--LICENSE.PSF-248
-rw-r--r--MANIFEST.in4
-rw-r--r--PKG-INFO37
-rw-r--r--daemon/__init__.py47
-rw-r--r--daemon/daemon.py776
-rw-r--r--daemon/pidlockfile.py194
-rw-r--r--daemon/runner.py229
-rw-r--r--daemon/version/__init__.py36
-rw-r--r--daemon/version/version_info.py23
-rw-r--r--python_daemon.egg-info/PKG-INFO37
-rw-r--r--python_daemon.egg-info/SOURCES.txt22
-rw-r--r--python_daemon.egg-info/dependency_links.txt1
-rw-r--r--python_daemon.egg-info/not-zip-safe1
-rw-r--r--python_daemon.egg-info/requires.txt2
-rw-r--r--python_daemon.egg-info/top_level.txt1
-rw-r--r--setup.cfg5
-rw-r--r--setup.py64
-rw-r--r--test/__init__.py19
-rw-r--r--test/scaffold.py402
-rw-r--r--test/test_daemon.py1937
-rw-r--r--test/test_pidlockfile.py791
-rw-r--r--test/test_runner.py662
24 files changed, 5864 insertions, 0 deletions
diff --git a/ChangeLog b/ChangeLog
new file mode 100644
index 0000000..d96fad7
--- /dev/null
+++ b/ChangeLog
@@ -0,0 +1,187 @@
+2010-03-02 Ben Finney <ben+python@benfinney.id.au>
+
+ Version 1.5.5 released.
+
+ * Stop using ‘pkg_resources’ and revert to pre-1.5.3 version-string
+ handling, until a better way that doesn't break everyone else's
+ installation can be found.
+
+2010-02-27 Ben Finney <ben+python@benfinney.id.au>
+
+ Version 1.5.4 released.
+
+ * MANIFEST.in: Explicitly include version data file, otherwise
+ everything breaks for users of the sdist.
+
+2010-02-26 Ben Finney <ben+python@benfinney.id.au>
+
+ Version 1.5.3 released.
+
+ * daemon/daemon.py: Invoke the pidfile context manager's ‘__exit__’
+ method with the correct arguments (as per
+ <URL:http://docs.python.org/library/stdtypes.html#typecontextmanager>).
+ Thanks to Ludvig Ericson for the bug report.
+ * version: New plain-text data file to store project version string.
+ * setup.py: Read version string from data file.
+ * daemon/version/__init__.py: Query version string with ‘pkg_resources’.
+
+2010-01-20 Ben Finney <ben+python@benfinney.id.au>
+
+ * Add ‘pylint’ configuration for this project.
+ * Update copyright notices.
+
+2009-10-24 Ben Finney <ben+python@benfinney.id.au>
+
+ Version 1.5.2 released.
+
+2009-10-19 Ben Finney <ben+python@benfinney.id.au>
+
+ * Ensure we only prevent core dumps if ‘prevent_core’ is true.
+ Thanks to Denis Bilenko for reporting the lacking implementation of
+ this documented option.
+
+2009-09-28 Ben Finney <ben+python@benfinney.id.au>
+
+ * Add initial Frequently Asked Questions document.
+
+2009-09-26 Ben Finney <ben+python@benfinney.id.au>
+
+ Version 1.5.1 released.
+
+ * Make a separate collection of DaemonRunner test scenarios.
+ * Handle a start request with a timeout on the PID file lock acquire.
+
+2009-09-24 Ben Finney <ben+python@benfinney.id.au>
+
+ * Implement ‘TimeoutPIDLockFile’ to specify a timeout in advance of
+ lock acquisition.
+ * Use lock with timeout for ‘DaemonRunner’.
+
+2009-09-24 Ben Finney <ben+python@benfinney.id.au>
+
+ Version 1.5 released.
+
+ * Make a separate collection of PIDLockFile test scenarios.
+
+2009-09-23 Ben Finney <ben+python@benfinney.id.au>
+
+ * Raise specific errors on ‘DaemonRunner’ failures.
+ * Distinguish different conditions on reading and parsing PID file.
+ * Refactor code to ‘_terminate_daemon_process’ method.
+ * Improve explanations in comments and docstrings.
+ * Don't set pidfile at all if no path specified to constructor.
+ * Write the PID file using correct OS locking and permissions.
+ * Close the PID file after writing.
+ * Implement ‘PIDLockFile’ as subclass of ‘lockfile.LinkFileLock’.
+ * Remove redundant checks for file existence.
+
+2009-09-18 Ben Finney <ben+python@benfinney.id.au>
+
+ * Manage the excluded file descriptors as a set (not a list).
+ * Only inspect the file descriptor of streams if they actually have
+ one (via a ‘fileno’ method) when determining which file descriptors
+ to close. Thanks to Ask Solem for revealing this bug.
+
+2009-09-17 Ben Finney <ben+python@benfinney.id.au>
+
+ Version 1.4.8 released.
+
+ * Remove child-exit signal (‘SIGCLD’, ‘SIGCHLD’) from default signal
+ map. Thanks to Joel Martin for pinpointing this issue.
+ * Document requirement for ensuring any operating-system specific
+ signal handlers are considered.
+ * Refactor ‘fork_then_exit_parent’ functionality to avoid duplicate
+ code.
+ * Remove redundant imports.
+ * Remove unused code from unit test suite scaffold.
+ * Add specific license terms for unit test suite scaffold.
+
+2009-09-03 Ben Finney <ben+python@benfinney.id.au>
+
+ Version 1.4.7 released.
+
+2009-09-02 Ben Finney <ben+python@benfinney.id.au>
+
+ * Fix keywords argument for distribution setup.
+ * Exclude ‘test’ package from distribution installation.
+
+2009-06-21 Ben Finney <ben+python@benfinney.id.au>
+
+ Version 1.4.6 released.
+
+ * Update documentation for changes from latest PEP 3143 revision.
+ * Implement DaemonContext.is_open method.
+
+2009-05-17 Ben Finney <ben+python@benfinney.id.au>
+
+ Version 1.4.5 released.
+
+ * Register DaemonContext.close method for atexit processing.
+ * Move PID file cleanup to close method.
+ * Improve docstrings by reference to, and copy from, PEP 3143.
+ * Use mock checking capabilities of newer ‘MiniMock’ library.
+ * Automate building a versioned distribution tarball.
+ * Include developer documentation files in source distribution.
+
+2009-03-26 Ben Finney <ben+python@benfinney.id.au>
+
+ Version 1.4.4 released.
+
+ * Conform to current PEP version, now released as PEP 3143 “Standard
+ daemon process library”.
+ * Ensure UID and GID are set in correct order.
+ * Delay closing all open files until just before re-binding standard
+ streams.
+ * Redirect standard streams to null device by default.
+
+2009-03-19 Ben Finney <ben+python@benfinney.id.au>
+
+ Version 1.4.3 released.
+
+ * Close the PID file context on exit.
+
+2009-03-18 Ben Finney <ben+python@benfinney.id.au>
+
+ Version 1.4.2 released.
+
+ * Context manager methods for DaemonContext.
+
+2009-03-18 Ben Finney <ben+python@benfinney.id.au>
+
+ Version 1.4.1 released.
+
+ * Improvements to docstrings.
+ * Further conformance with draft PEP.
+
+2009-03-17 Ben Finney <ben+python@benfinney.id.au>
+
+ Version 1.4 released.
+
+ * Implement the interface from a draft PEP for process daemonisation.
+ * Complete statement coverage from unit test suite.
+
+2009-03-12 Ben Finney <ben+python@benfinney.id.au>
+
+ Version 1.3 released.
+
+ * Separate controller (now ‘DaemonRunner’) from daemon process
+ context (now ‘DaemonContext’).
+ * Fix many corner cases and bugs.
+ * Huge increase in unit test suite.
+
+2009-01-27 Ben Finney <ben+python@benfinney.id.au>
+
+ Version 1.2 released.
+
+ * Initial release of this project forked from ‘bda.daemon’. Thanks,
+ Robert Niederreiter.
+ * Refactor some functionality out to helper functions.
+ * Begin unit test suite.
+
+
+Local variables:
+mode: change-log
+coding: utf-8
+left-margin: 4
+indent-tabs-mode: nil
+End:
diff --git a/LICENSE.GPL-2 b/LICENSE.GPL-2
new file mode 100644
index 0000000..d511905
--- /dev/null
+++ b/LICENSE.GPL-2
@@ -0,0 +1,339 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users. This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it. (Some other Free Software Foundation software is covered by
+the GNU Lesser General Public License instead.) You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have. You must make sure that they, too, receive or can get the
+source code. And you must show them these terms so they know their
+rights.
+
+ We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+ Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software. If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary. To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ GNU GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License. The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language. (Hereinafter, translation is included without limitation in
+the term "modification".) Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+ 1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+ 2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) You must cause the modified files to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ b) You must cause any work that you distribute or publish, that in
+ whole or in part contains or is derived from the Program or any
+ part thereof, to be licensed as a whole at no charge to all third
+ parties under the terms of this License.
+
+ c) If the modified program normally reads commands interactively
+ when run, you must cause it, when started running for such
+ interactive use in the most ordinary way, to print or display an
+ announcement including an appropriate copyright notice and a
+ notice that there is no warranty (or else, saying that you provide
+ a warranty) and that users may redistribute the program under
+ these conditions, and telling the user how to view a copy of this
+ License. (Exception: if the Program itself is interactive but
+ does not normally print such an announcement, your work based on
+ the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+ a) Accompany it with the complete corresponding machine-readable
+ source code, which must be distributed under the terms of Sections
+ 1 and 2 above on a medium customarily used for software interchange; or,
+
+ b) Accompany it with a written offer, valid for at least three
+ years, to give any third party, for a charge no more than your
+ cost of physically performing source distribution, a complete
+ machine-readable copy of the corresponding source code, to be
+ distributed under the terms of Sections 1 and 2 above on a medium
+ customarily used for software interchange; or,
+
+ c) Accompany it with the information you received as to the offer
+ to distribute corresponding source code. (This alternative is
+ allowed only for noncommercial distribution and only if you
+ received the program in object code or executable form with such
+ an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it. For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable. However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License. Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+ 5. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Program or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+ 6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+ 7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all. For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded. In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+ 9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation. If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+ 10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission. For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this. Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+ NO WARRANTY
+
+ 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+ 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+ Gnomovision version 69, Copyright (C) year name of author
+ Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+ `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+ <signature of Ty Coon>, 1 April 1989
+ Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.
diff --git a/LICENSE.PSF-2 b/LICENSE.PSF-2
new file mode 100644
index 0000000..28533b6
--- /dev/null
+++ b/LICENSE.PSF-2
@@ -0,0 +1,48 @@
+PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
+--------------------------------------------
+
+1. This LICENSE AGREEMENT is between the Python Software Foundation
+("PSF"), and the Individual or Organization ("Licensee") accessing and
+otherwise using this software ("Python") in source or binary form and
+its associated documentation.
+
+2. Subject to the terms and conditions of this License Agreement, PSF
+hereby grants Licensee a nonexclusive, royalty-free, world-wide
+license to reproduce, analyze, test, perform and/or display publicly,
+prepare derivative works, distribute, and otherwise use Python
+alone or in any derivative version, provided, however, that PSF's
+License Agreement and PSF's notice of copyright, i.e., "Copyright (c)
+2001, 2002, 2003, 2004, 2005, 2006, 2007 Python Software Foundation;
+All Rights Reserved" are retained in Python alone or in any derivative
+version prepared by Licensee.
+
+3. In the event Licensee prepares a derivative work that is based on
+or incorporates Python or any part thereof, and wants to make
+the derivative work available to others as provided herein, then
+Licensee hereby agrees to include in any such work a brief summary of
+the changes made to Python.
+
+4. PSF is making Python available to Licensee on an "AS IS"
+basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
+INFRINGE ANY THIRD PARTY RIGHTS.
+
+5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
+OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+
+6. This License Agreement will automatically terminate upon a material
+breach of its terms and conditions.
+
+7. Nothing in this License Agreement shall be deemed to create any
+relationship of agency, partnership, or joint venture between PSF and
+Licensee. This License Agreement does not grant permission to use PSF
+trademarks or trade name in a trademark sense to endorse or promote
+products or services of Licensee, or any third party.
+
+8. By copying, installing or otherwise using Python, Licensee
+agrees to be bound by the terms and conditions of this License
+Agreement.
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 0000000..ef71641
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,4 @@
+include MANIFEST.in
+include LICENSE.*
+include ChangeLog
+include TODO
diff --git a/PKG-INFO b/PKG-INFO
new file mode 100644
index 0000000..df8f553
--- /dev/null
+++ b/PKG-INFO
@@ -0,0 +1,37 @@
+Metadata-Version: 1.0
+Name: python-daemon
+Version: 1.5.5
+Summary: Library to implement a well-behaved Unix daemon process.
+Home-page: http://pypi.python.org/pypi/python-daemon/
+Author: Ben Finney
+Author-email: ben+python@benfinney.id.au
+License: PSF-2+
+Description: This library implements the well-behaved daemon specification of
+ :pep:`3143`, "Standard daemon process library".
+
+ A well-behaved Unix daemon process is tricky to get right, but the
+ required steps are much the same for every daemon program. A
+ `DaemonContext` instance holds the behaviour and configured
+ process environment for the program; use the instance as a context
+ manager to enter a daemon state.
+
+ Simple example of usage::
+
+ import daemon
+
+ from spam import do_main_program
+
+ with daemon.DaemonContext():
+ do_main_program()
+
+ Customisation of the steps to become a daemon is available by
+ setting options on the `DaemonContext` instance; see the
+ documentation for that class for each option.
+Keywords: daemon,fork,unix
+Platform: UNKNOWN
+Classifier: Development Status :: 4 - Beta
+Classifier: License :: OSI Approved :: Python Software Foundation License
+Classifier: Operating System :: POSIX
+Classifier: Programming Language :: Python
+Classifier: Intended Audience :: Developers
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
diff --git a/daemon/__init__.py b/daemon/__init__.py
new file mode 100644
index 0000000..d8dc171
--- /dev/null
+++ b/daemon/__init__.py
@@ -0,0 +1,47 @@
+# -*- coding: utf-8 -*-
+
+# daemon/__init__.py
+# Part of python-daemon, an implementation of PEP 3143.
+#
+# Copyright © 2009–2010 Ben Finney <ben+python@benfinney.id.au>
+# Copyright © 2006 Robert Niederreiter
+#
+# This is free software: you may copy, modify, and/or distribute this work
+# under the terms of the Python Software Foundation License, version 2 or
+# later as published by the Python Software Foundation.
+# No warranty expressed or implied. See the file LICENSE.PSF-2 for details.
+
+""" Library to implement a well-behaved Unix daemon process.
+
+ This library implements the well-behaved daemon specification of
+ :pep:`3143`, "Standard daemon process library".
+
+ A well-behaved Unix daemon process is tricky to get right, but the
+ required steps are much the same for every daemon program. A
+ `DaemonContext` instance holds the behaviour and configured
+ process environment for the program; use the instance as a context
+ manager to enter a daemon state.
+
+ Simple example of usage::
+
+ import daemon
+
+ from spam import do_main_program
+
+ with daemon.DaemonContext():
+ do_main_program()
+
+ Customisation of the steps to become a daemon is available by
+ setting options on the `DaemonContext` instance; see the
+ documentation for that class for each option.
+
+ """
+
+import version
+from daemon import DaemonContext
+
+
+_version = version.version
+_copyright = version.copyright
+_license = version.license
+_url = "http://pypi.python.org/pypi/python-daemon/"
diff --git a/daemon/daemon.py b/daemon/daemon.py
new file mode 100644
index 0000000..28db695
--- /dev/null
+++ b/daemon/daemon.py
@@ -0,0 +1,776 @@
+# -*- coding: utf-8 -*-
+
+# daemon/daemon.py
+# Part of python-daemon, an implementation of PEP 3143.
+#
+# Copyright © 2008–2010 Ben Finney <ben+python@benfinney.id.au>
+# Copyright © 2007–2008 Robert Niederreiter, Jens Klein
+# Copyright © 2004–2005 Chad J. Schroeder
+# Copyright © 2003 Clark Evans
+# Copyright © 2002 Noah Spurrier
+# Copyright © 2001 Jürgen Hermann
+#
+# This is free software: you may copy, modify, and/or distribute this work
+# under the terms of the Python Software Foundation License, version 2 or
+# later as published by the Python Software Foundation.
+# No warranty expressed or implied. See the file LICENSE.PSF-2 for details.
+
+""" Daemon process behaviour.
+ """
+
+import os
+import sys
+import resource
+import errno
+import signal
+import socket
+import atexit
+
+
+class DaemonError(Exception):
+ """ Base exception class for errors from this module. """
+
+
+class DaemonOSEnvironmentError(DaemonError, OSError):
+ """ Exception raised when daemon OS environment setup receives error. """
+
+
+class DaemonProcessDetachError(DaemonError, OSError):
+ """ Exception raised when process detach fails. """
+
+
+class DaemonContext(object):
+ """ Context for turning the current program into a daemon process.
+
+ A `DaemonContext` instance represents the behaviour settings and
+ process context for the program when it becomes a daemon. The
+ behaviour and environment is customised by setting options on the
+ instance, before calling the `open` method.
+
+ Each option can be passed as a keyword argument to the `DaemonContext`
+ constructor, or subsequently altered by assigning to an attribute on
+ the instance at any time prior to calling `open`. That is, for
+ options named `wibble` and `wubble`, the following invocation::
+
+ foo = daemon.DaemonContext(wibble=bar, wubble=baz)
+ foo.open()
+
+ is equivalent to::
+
+ foo = daemon.DaemonContext()
+ foo.wibble = bar
+ foo.wubble = baz
+ foo.open()
+
+ The following options are defined.
+
+ `files_preserve`
+ :Default: ``None``
+
+ List of files that should *not* be closed when starting the
+ daemon. If ``None``, all open file descriptors will be closed.
+
+ Elements of the list are file descriptors (as returned by a file
+ object's `fileno()` method) or Python `file` objects. Each
+ specifies a file that is not to be closed during daemon start.
+
+ `chroot_directory`
+ :Default: ``None``
+
+ Full path to a directory to set as the effective root directory of
+ the process. If ``None``, specifies that the root directory is not
+ to be changed.
+
+ `working_directory`
+ :Default: ``'/'``
+
+ Full path of the working directory to which the process should
+ change on daemon start.
+
+ Since a filesystem cannot be unmounted if a process has its
+ current working directory on that filesystem, this should either
+ be left at default or set to a directory that is a sensible “home
+ directory” for the daemon while it is running.
+
+ `umask`
+ :Default: ``0``
+
+ File access creation mask (“umask”) to set for the process on
+ daemon start.
+
+ Since a process inherits its umask from its parent process,
+ starting the daemon will reset the umask to this value so that
+ files are created by the daemon with access modes as it expects.
+
+ `pidfile`
+ :Default: ``None``
+
+ Context manager for a PID lock file. When the daemon context opens
+ and closes, it enters and exits the `pidfile` context manager.
+
+ `detach_process`
+ :Default: ``None``
+
+ If ``True``, detach the process context when opening the daemon
+ context; if ``False``, do not detach.
+
+ If unspecified (``None``) during initialisation of the instance,
+ this will be set to ``True`` by default, and ``False`` only if
+ detaching the process is determined to be redundant; for example,
+ in the case when the process was started by `init`, by `initd`, or
+ by `inetd`.
+
+ `signal_map`
+ :Default: system-dependent
+
+ Mapping from operating system signals to callback actions.
+
+ The mapping is used when the daemon context opens, and determines
+ the action for each signal's signal handler:
+
+ * A value of ``None`` will ignore the signal (by setting the
+ signal action to ``signal.SIG_IGN``).
+
+ * A string value will be used as the name of an attribute on the
+ ``DaemonContext`` instance. The attribute's value will be used
+ as the action for the signal handler.
+
+ * Any other value will be used as the action for the
+ signal handler. See the ``signal.signal`` documentation
+ for details of the signal handler interface.
+
+ The default value depends on which signals are defined on the
+ running system. Each item from the list below whose signal is
+ actually defined in the ``signal`` module will appear in the
+ default map:
+
+ * ``signal.SIGTTIN``: ``None``
+
+ * ``signal.SIGTTOU``: ``None``
+
+ * ``signal.SIGTSTP``: ``None``
+
+ * ``signal.SIGTERM``: ``'terminate'``
+
+ Depending on how the program will interact with its child
+ processes, it may need to specify a signal map that
+ includes the ``signal.SIGCHLD`` signal (received when a
+ child process exits). See the specific operating system's
+ documentation for more detail on how to determine what
+ circumstances dictate the need for signal handlers.
+
+ `uid`
+ :Default: ``os.getuid()``
+
+ `gid`
+ :Default: ``os.getgid()``
+
+ The user ID (“UID”) value and group ID (“GID”) value to switch
+ the process to on daemon start.
+
+ The default values, the real UID and GID of the process, will
+ relinquish any effective privilege elevation inherited by the
+ process.
+
+ `prevent_core`
+ :Default: ``True``
+
+ If true, prevents the generation of core files, in order to avoid
+ leaking sensitive information from daemons run as `root`.
+
+ `stdin`
+ :Default: ``None``
+
+ `stdout`
+ :Default: ``None``
+
+ `stderr`
+ :Default: ``None``
+
+ Each of `stdin`, `stdout`, and `stderr` is a file-like object
+ which will be used as the new file for the standard I/O stream
+ `sys.stdin`, `sys.stdout`, and `sys.stderr` respectively. The file
+ should therefore be open, with a minimum of mode 'r' in the case
+ of `stdin`, and mode 'w+' in the case of `stdout` and `stderr`.
+
+ If the object has a `fileno()` method that returns a file
+ descriptor, the corresponding file will be excluded from being
+ closed during daemon start (that is, it will be treated as though
+ it were listed in `files_preserve`).
+
+ If ``None``, the corresponding system stream is re-bound to the
+ file named by `os.devnull`.
+
+ """
+
+ def __init__(
+ self,
+ chroot_directory=None,
+ working_directory='/',
+ umask=0,
+ uid=None,
+ gid=None,
+ prevent_core=True,
+ detach_process=None,
+ files_preserve=None,
+ pidfile=None,
+ stdin=None,
+ stdout=None,
+ stderr=None,
+ signal_map=None,
+ ):
+ """ Set up a new instance. """
+ self.chroot_directory = chroot_directory
+ self.working_directory = working_directory
+ self.umask = umask
+ self.prevent_core = prevent_core
+ self.files_preserve = files_preserve
+ self.pidfile = pidfile
+ self.stdin = stdin
+ self.stdout = stdout
+ self.stderr = stderr
+
+ if uid is None:
+ uid = os.getuid()
+ self.uid = uid
+ if gid is None:
+ gid = os.getgid()
+ self.gid = gid
+
+ if detach_process is None:
+ detach_process = is_detach_process_context_required()
+ self.detach_process = detach_process
+
+ if signal_map is None:
+ signal_map = make_default_signal_map()
+ self.signal_map = signal_map
+
+ self._is_open = False
+
+ @property
+ def is_open(self):
+ """ ``True`` if the instance is currently open. """
+ return self._is_open
+
+ def open(self):
+ """ Become a daemon process.
+ :Return: ``None``
+
+ Open the daemon context, turning the current program into a daemon
+ process. This performs the following steps:
+
+ * If this instance's `is_open` property is true, return
+ immediately. This makes it safe to call `open` multiple times on
+ an instance.
+
+ * If the `prevent_core` attribute is true, set the resource limits
+ for the process to prevent any core dump from the process.
+
+ * If the `chroot_directory` attribute is not ``None``, set the
+ effective root directory of the process to that directory (via
+ `os.chroot`).
+
+ This allows running the daemon process inside a “chroot gaol”
+ as a means of limiting the system's exposure to rogue behaviour
+ by the process. Note that the specified directory needs to
+ already be set up for this purpose.
+
+ * Set the process UID and GID to the `uid` and `gid` attribute
+ values.
+
+ * Close all open file descriptors. This excludes those listed in
+ the `files_preserve` attribute, and those that correspond to the
+ `stdin`, `stdout`, or `stderr` attributes.
+
+ * Change current working directory to the path specified by the
+ `working_directory` attribute.
+
+ * Reset the file access creation mask to the value specified by
+ the `umask` attribute.
+
+ * If the `detach_process` option is true, detach the current
+ process into its own process group, and disassociate from any
+ controlling terminal.
+
+ * Set signal handlers as specified by the `signal_map` attribute.
+
+ * If any of the attributes `stdin`, `stdout`, `stderr` are not
+ ``None``, bind the system streams `sys.stdin`, `sys.stdout`,
+ and/or `sys.stderr` to the files represented by the
+ corresponding attributes. Where the attribute has a file
+ descriptor, the descriptor is duplicated (instead of re-binding
+ the name).
+
+ * If the `pidfile` attribute is not ``None``, enter its context
+ manager.
+
+ * Mark this instance as open (for the purpose of future `open` and
+ `close` calls).
+
+ * Register the `close` method to be called during Python's exit
+ processing.
+
+ When the function returns, the running program is a daemon
+ process.
+
+ """
+ if self.is_open:
+ return
+
+ if self.chroot_directory is not None:
+ change_root_directory(self.chroot_directory)
+
+ if self.prevent_core:
+ prevent_core_dump()
+
+ change_file_creation_mask(self.umask)
+ change_working_directory(self.working_directory)
+ change_process_owner(self.uid, self.gid)
+
+ if self.detach_process:
+ detach_process_context()
+
+ signal_handler_map = self._make_signal_handler_map()
+ set_signal_handlers(signal_handler_map)
+
+ exclude_fds = self._get_exclude_file_descriptors()
+ close_all_open_files(exclude=exclude_fds)
+
+ redirect_stream(sys.stdin, self.stdin)
+ redirect_stream(sys.stdout, self.stdout)
+ redirect_stream(sys.stderr, self.stderr)
+
+ if self.pidfile is not None:
+ self.pidfile.__enter__()
+
+ self._is_open = True
+
+ register_atexit_function(self.close)
+
+ def __enter__(self):
+ """ Context manager entry point. """
+ self.open()
+ return self
+
+ def close(self):
+ """ Exit the daemon process context.
+ :Return: ``None``
+
+ Close the daemon context. This performs the following steps:
+
+ * If this instance's `is_open` property is false, return
+ immediately. This makes it safe to call `close` multiple times
+ on an instance.
+
+ * If the `pidfile` attribute is not ``None``, exit its context
+ manager.
+
+ * Mark this instance as closed (for the purpose of future `open`
+ and `close` calls).
+
+ """
+ if not self.is_open:
+ return
+
+ if self.pidfile is not None:
+ # Follow the interface for telling a context manager to exit,
+ # <URL:http://docs.python.org/library/stdtypes.html#typecontextmanager>.
+ self.pidfile.__exit__(None, None, None)
+
+ self._is_open = False
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ """ Context manager exit point. """
+ self.close()
+
+ def terminate(self, signal_number, stack_frame):
+ """ Signal handler for end-process signals.
+ :Return: ``None``
+
+ Signal handler for the ``signal.SIGTERM`` signal. Performs the
+ following step:
+
+ * Raise a ``SystemExit`` exception explaining the signal.
+
+ """
+ exception = SystemExit(
+ "Terminating on signal %(signal_number)r"
+ % vars())
+ raise exception
+
+ def _get_exclude_file_descriptors(self):
+ """ Return the set of file descriptors to exclude closing.
+
+ Returns a set containing the file descriptors for the
+ items in `files_preserve`, and also each of `stdin`,
+ `stdout`, and `stderr`:
+
+ * If the item is ``None``, it is omitted from the return
+ set.
+
+ * If the item has a ``fileno()`` method, that method's
+ return value is in the return set.
+
+ * Otherwise, the item is in the return set verbatim.
+
+ """
+ files_preserve = self.files_preserve
+ if files_preserve is None:
+ files_preserve = []
+ files_preserve.extend(
+ item for item in [self.stdin, self.stdout, self.stderr]
+ if hasattr(item, 'fileno'))
+ exclude_descriptors = set()
+ for item in files_preserve:
+ if item is None:
+ continue
+ if hasattr(item, 'fileno'):
+ exclude_descriptors.add(item.fileno())
+ else:
+ exclude_descriptors.add(item)
+ return exclude_descriptors
+
+ def _make_signal_handler(self, target):
+ """ Make the signal handler for a specified target object.
+
+ If `target` is ``None``, returns ``signal.SIG_IGN``. If
+ `target` is a string, returns the attribute of this
+ instance named by that string. Otherwise, returns `target`
+ itself.
+
+ """
+ if target is None:
+ result = signal.SIG_IGN
+ elif isinstance(target, basestring):
+ name = target
+ result = getattr(self, name)
+ else:
+ result = target
+
+ return result
+
+ def _make_signal_handler_map(self):
+ """ Make the map from signals to handlers for this instance.
+
+ Constructs a map from signal numbers to handlers for this
+ context instance, suitable for passing to
+ `set_signal_handlers`.
+
+ """
+ signal_handler_map = dict(
+ (signal_number, self._make_signal_handler(target))
+ for (signal_number, target) in self.signal_map.items())
+ return signal_handler_map
+
+
+def change_working_directory(directory):
+ """ Change the working directory of this process.
+ """
+ try:
+ os.chdir(directory)
+ except Exception, exc:
+ error = DaemonOSEnvironmentError(
+ "Unable to change working directory (%(exc)s)"
+ % vars())
+ raise error
+
+
+def change_root_directory(directory):
+ """ Change the root directory of this process.
+
+ Sets the current working directory, then the process root
+ directory, to the specified `directory`. Requires appropriate
+ OS privileges for this process.
+
+ """
+ try:
+ os.chdir(directory)
+ os.chroot(directory)
+ except Exception, exc:
+ error = DaemonOSEnvironmentError(
+ "Unable to change root directory (%(exc)s)"
+ % vars())
+ raise error
+
+
+def change_file_creation_mask(mask):
+ """ Change the file creation mask for this process.
+ """
+ try:
+ os.umask(mask)
+ except Exception, exc:
+ error = DaemonOSEnvironmentError(
+ "Unable to change file creation mask (%(exc)s)"
+ % vars())
+ raise error
+
+
+def change_process_owner(uid, gid):
+ """ Change the owning UID and GID of this process.
+
+ Sets the GID then the UID of the process (in that order, to
+ avoid permission errors) to the specified `gid` and `uid`
+ values. Requires appropriate OS privileges for this process.
+
+ """
+ try:
+ os.setgid(gid)
+ os.setuid(uid)
+ except Exception, exc:
+ error = DaemonOSEnvironmentError(
+ "Unable to change file creation mask (%(exc)s)"
+ % vars())
+ raise error
+
+
+def prevent_core_dump():
+ """ Prevent this process from generating a core dump.
+
+ Sets the soft and hard limits for core dump size to zero. On
+ Unix, this prevents the process from creating core dump
+ altogether.
+
+ """
+ core_resource = resource.RLIMIT_CORE
+
+ try:
+ # Ensure the resource limit exists on this platform, by requesting
+ # its current value
+ core_limit_prev = resource.getrlimit(core_resource)
+ except ValueError, exc:
+ error = DaemonOSEnvironmentError(
+ "System does not support RLIMIT_CORE resource limit (%(exc)s)"
+ % vars())
+ raise error
+
+ # Set hard and soft limits to zero, i.e. no core dump at all
+ core_limit = (0, 0)
+ resource.setrlimit(core_resource, core_limit)
+
+
+def detach_process_context():
+ """ Detach the process context from parent and session.
+
+ Detach from the parent process and session group, allowing the
+ parent to exit while this process continues running.
+
+ Reference: “Advanced Programming in the Unix Environment”,
+ section 13.3, by W. Richard Stevens, published 1993 by
+ Addison-Wesley.
+
+ """
+
+ def fork_then_exit_parent(error_message):
+ """ Fork a child process, then exit the parent process.
+
+ If the fork fails, raise a ``DaemonProcessDetachError``
+ with ``error_message``.
+
+ """
+ try:
+ pid = os.fork()
+ if pid > 0:
+ os._exit(0)
+ except OSError, exc:
+ exc_errno = exc.errno
+ exc_strerror = exc.strerror
+ error = DaemonProcessDetachError(
+ "%(error_message)s: [%(exc_errno)d] %(exc_strerror)s" % vars())
+ raise error
+
+ fork_then_exit_parent(error_message="Failed first fork")
+ os.setsid()
+ fork_then_exit_parent(error_message="Failed second fork")
+
+
+def is_process_started_by_init():
+ """ Determine if the current process is started by `init`.
+
+ The `init` process has the process ID of 1; if that is our
+ parent process ID, return ``True``, otherwise ``False``.
+
+ """
+ result = False
+
+ init_pid = 1
+ if os.getppid() == init_pid:
+ result = True
+
+ return result
+
+
+def is_socket(fd):
+ """ Determine if the file descriptor is a socket.
+
+ Return ``False`` if querying the socket type of `fd` raises an
+ error; otherwise return ``True``.
+
+ """
+ result = False
+
+ file_socket = socket.fromfd(fd, socket.AF_INET, socket.SOCK_RAW)
+
+ try:
+ socket_type = file_socket.getsockopt(
+ socket.SOL_SOCKET, socket.SO_TYPE)
+ except socket.error, exc:
+ exc_errno = exc.args[0]
+ if exc_errno == errno.ENOTSOCK:
+ # Socket operation on non-socket
+ pass
+ else:
+ # Some other socket error
+ result = True
+ else:
+ # No error getting socket type
+ result = True
+
+ return result
+
+
+def is_process_started_by_superserver():
+ """ Determine if the current process is started by the superserver.
+
+ The internet superserver creates a network socket, and
+ attaches it to the standard streams of the child process. If
+ that is the case for this process, return ``True``, otherwise
+ ``False``.
+
+ """
+ result = False
+
+ stdin_fd = sys.__stdin__.fileno()
+ if is_socket(stdin_fd):
+ result = True
+
+ return result
+
+
+def is_detach_process_context_required():
+ """ Determine whether detaching process context is required.
+
+ Return ``True`` if the process environment indicates the
+ process is already detached:
+
+ * Process was started by `init`; or
+
+ * Process was started by `inetd`.
+
+ """
+ result = True
+ if is_process_started_by_init() or is_process_started_by_superserver():
+ result = False
+
+ return result
+
+
+def close_file_descriptor_if_open(fd):
+ """ Close a file descriptor if already open.
+
+ Close the file descriptor `fd`, suppressing an error in the
+ case the file was not open.
+
+ """
+ try:
+ os.close(fd)
+ except OSError, exc:
+ if exc.errno == errno.EBADF:
+ # File descriptor was not open
+ pass
+ else:
+ error = DaemonOSEnvironmentError(
+ "Failed to close file descriptor %(fd)d"
+ " (%(exc)s)"
+ % vars())
+ raise error
+
+
+MAXFD = 2048
+
+def get_maximum_file_descriptors():
+ """ Return the maximum number of open file descriptors for this process.
+
+ Return the process hard resource limit of maximum number of
+ open file descriptors. If the limit is “infinity”, a default
+ value of ``MAXFD`` is returned.
+
+ """
+ limits = resource.getrlimit(resource.RLIMIT_NOFILE)
+ result = limits[1]
+ if result == resource.RLIM_INFINITY:
+ result = MAXFD
+ return result
+
+
+def close_all_open_files(exclude=set()):
+ """ Close all open file descriptors.
+
+ Closes every file descriptor (if open) of this process. If
+ specified, `exclude` is a set of file descriptors to *not*
+ close.
+
+ """
+ maxfd = get_maximum_file_descriptors()
+ for fd in reversed(range(maxfd)):
+ if fd not in exclude:
+ close_file_descriptor_if_open(fd)
+
+
+def redirect_stream(system_stream, target_stream):
+ """ Redirect a system stream to a specified file.
+
+ `system_stream` is a standard system stream such as
+ ``sys.stdout``. `target_stream` is an open file object that
+ should replace the corresponding system stream object.
+
+ If `target_stream` is ``None``, defaults to opening the
+ operating system's null device and using its file descriptor.
+
+ """
+ if target_stream is None:
+ target_fd = os.open(os.devnull, os.O_RDWR)
+ else:
+ target_fd = target_stream.fileno()
+ os.dup2(target_fd, system_stream.fileno())
+
+
+def make_default_signal_map():
+ """ Make the default signal map for this system.
+
+ The signals available differ by system. The map will not
+ contain any signals not defined on the running system.
+
+ """
+ name_map = {
+ 'SIGTSTP': None,
+ 'SIGTTIN': None,
+ 'SIGTTOU': None,
+ 'SIGTERM': 'terminate',
+ }
+ signal_map = dict(
+ (getattr(signal, name), target)
+ for (name, target) in name_map.items()
+ if hasattr(signal, name))
+
+ return signal_map
+
+
+def set_signal_handlers(signal_handler_map):
+ """ Set the signal handlers as specified.
+
+ The `signal_handler_map` argument is a map from signal number
+ to signal handler. See the `signal` module for details.
+
+ """
+ for (signal_number, handler) in signal_handler_map.items():
+ signal.signal(signal_number, handler)
+
+
+def register_atexit_function(func):
+ """ Register a function for processing at program exit.
+
+ The function `func` is registered for a call with no arguments
+ at program exit.
+
+ """
+ atexit.register(func)
diff --git a/daemon/pidlockfile.py b/daemon/pidlockfile.py
new file mode 100644
index 0000000..c38beae
--- /dev/null
+++ b/daemon/pidlockfile.py
@@ -0,0 +1,194 @@
+# -*- coding: utf-8 -*-
+
+# daemon/pidlockfile.py
+# Part of python-daemon, an implementation of PEP 3143.
+#
+# Copyright © 2008–2010 Ben Finney <ben+python@benfinney.id.au>
+#
+# This is free software: you may copy, modify, and/or distribute this work
+# under the terms of the Python Software Foundation License, version 2 or
+# later as published by the Python Software Foundation.
+# No warranty expressed or implied. See the file LICENSE.PSF-2 for details.
+
+""" Lockfile behaviour implemented via Unix PID files.
+ """
+
+import os
+import errno
+
+from lockfile import (
+ LinkFileLock,
+ AlreadyLocked, LockFailed,
+ NotLocked, NotMyLock,
+ )
+
+
+class PIDFileError(Exception):
+ """ Abstract base class for errors specific to PID files. """
+
+class PIDFileParseError(ValueError, PIDFileError):
+ """ Raised when parsing contents of PID file fails. """
+
+
+class PIDLockFile(LinkFileLock, object):
+ """ Lockfile implemented as a Unix PID file.
+
+ The PID file is named by the attribute `path`. When locked,
+ the file will be created with a single line of text,
+ containing the process ID (PID) of the process that acquired
+ the lock.
+
+ The lock is acquired and maintained as per `LinkFileLock`.
+
+ """
+
+ def read_pid(self):
+ """ Get the PID from the lock file.
+ """
+ result = read_pid_from_pidfile(self.path)
+ return result
+
+ def acquire(self, *args, **kwargs):
+ """ Acquire the lock.
+
+ Locks the PID file then creates the PID file for this
+ lock. The `timeout` parameter is used as for the
+ `LinkFileLock` class.
+
+ """
+ super(PIDLockFile, self).acquire(*args, **kwargs)
+ try:
+ write_pid_to_pidfile(self.path)
+ except OSError, exc:
+ error = LockFailed("%(exc)s" % vars())
+ raise error
+
+ def release(self):
+ """ Release the lock.
+
+ Removes the PID file then releases the lock, or raises an
+ error if the current process does not hold the lock.
+
+ """
+ if self.i_am_locking():
+ remove_existing_pidfile(self.path)
+ super(PIDLockFile, self).release()
+
+ def break_lock(self):
+ """ Break an existing lock.
+
+ If the lock is held, breaks the lock and removes the PID
+ file.
+
+ """
+ super(PIDLockFile, self).break_lock()
+ remove_existing_pidfile(self.path)
+
+
+class TimeoutPIDLockFile(PIDLockFile):
+ """ Lockfile with default timeout, implemented as a Unix PID file.
+
+ This uses the ``PIDLockFile`` implementation, with the
+ following changes:
+
+ * The `acquire_timeout` parameter to the initialiser will be
+ used as the default `timeout` parameter for the `acquire`
+ method.
+
+ """
+
+ def __init__(self, path, acquire_timeout=None, *args, **kwargs):
+ """ Set up the parameters of a DaemonRunnerLock. """
+ self.acquire_timeout = acquire_timeout
+ super(TimeoutPIDLockFile, self).__init__(path, *args, **kwargs)
+
+ def acquire(self, timeout=None, *args, **kwargs):
+ """ Acquire the lock. """
+ if timeout is None:
+ timeout = self.acquire_timeout
+ super(TimeoutPIDLockFile, self).acquire(timeout, *args, **kwargs)
+
+
+def read_pid_from_pidfile(pidfile_path):
+ """ Read the PID recorded in the named PID file.
+
+ Read and return the numeric PID recorded as text in the named
+ PID file. If the PID file does not exist, return ``None``. If
+ the content is not a valid PID, raise ``PIDFileParseError``.
+
+ """
+ pid = None
+ pidfile = None
+ try:
+ pidfile = open(pidfile_path, 'r')
+ except IOError, exc:
+ if exc.errno == errno.ENOENT:
+ pass
+ else:
+ raise
+
+ if pidfile:
+ # According to the FHS 2.3 section on PID files in ‘/var/run’:
+ #
+ # The file must consist of the process identifier in
+ # ASCII-encoded decimal, followed by a newline character. …
+ #
+ # Programs that read PID files should be somewhat flexible
+ # in what they accept; i.e., they should ignore extra
+ # whitespace, leading zeroes, absence of the trailing
+ # newline, or additional lines in the PID file.
+
+ line = pidfile.readline().strip()
+ try:
+ pid = int(line)
+ except ValueError:
+ raise PIDFileParseError(
+ "PID file %(pidfile_path)r contents invalid" % vars())
+ pidfile.close()
+
+ return pid
+
+
+def write_pid_to_pidfile(pidfile_path):
+ """ Write the PID in the named PID file.
+
+ Get the numeric process ID (“PID”) of the current process
+ and write it to the named file as a line of text.
+
+ """
+ open_flags = (os.O_CREAT | os.O_EXCL | os.O_WRONLY)
+ open_mode = (
+ ((os.R_OK | os.W_OK) << 6) |
+ ((os.R_OK) << 3) |
+ ((os.R_OK)))
+ pidfile_fd = os.open(pidfile_path, open_flags, open_mode)
+ pidfile = os.fdopen(pidfile_fd, 'w')
+
+ # According to the FHS 2.3 section on PID files in ‘/var/run’:
+ #
+ # The file must consist of the process identifier in
+ # ASCII-encoded decimal, followed by a newline character. For
+ # example, if crond was process number 25, /var/run/crond.pid
+ # would contain three characters: two, five, and newline.
+
+ pid = os.getpid()
+ line = "%(pid)d\n" % vars()
+ pidfile.write(line)
+ pidfile.close()
+
+
+def remove_existing_pidfile(pidfile_path):
+ """ Remove the named PID file if it exists.
+
+ Remove the named PID file. Ignore the condition if the file
+ does not exist, since that only means we are already in the
+ desired state.
+
+ """
+ try:
+ os.remove(pidfile_path)
+ except OSError, exc:
+ if exc.errno == errno.ENOENT:
+ pass
+ else:
+ raise
diff --git a/daemon/runner.py b/daemon/runner.py
new file mode 100644
index 0000000..0642695
--- /dev/null
+++ b/daemon/runner.py
@@ -0,0 +1,229 @@
+# -*- coding: utf-8 -*-
+
+# daemon/runner.py
+# Part of python-daemon, an implementation of PEP 3143.
+#
+# Copyright © 2009–2010 Ben Finney <ben+python@benfinney.id.au>
+# Copyright © 2007–2008 Robert Niederreiter, Jens Klein
+# Copyright © 2003 Clark Evans
+# Copyright © 2002 Noah Spurrier
+# Copyright © 2001 Jürgen Hermann
+#
+# This is free software: you may copy, modify, and/or distribute this work
+# under the terms of the Python Software Foundation License, version 2 or
+# later as published by the Python Software Foundation.
+# No warranty expressed or implied. See the file LICENSE.PSF-2 for details.
+
+""" Daemon runner library.
+ """
+
+import sys
+import os
+import signal
+import errno
+
+import pidlockfile
+
+from daemon import DaemonContext
+
+
+class DaemonRunnerError(Exception):
+ """ Abstract base class for errors from DaemonRunner. """
+
+class DaemonRunnerInvalidActionError(ValueError, DaemonRunnerError):
+ """ Raised when specified action for DaemonRunner is invalid. """
+
+class DaemonRunnerStartFailureError(RuntimeError, DaemonRunnerError):
+ """ Raised when failure starting DaemonRunner. """
+
+class DaemonRunnerStopFailureError(RuntimeError, DaemonRunnerError):
+ """ Raised when failure stopping DaemonRunner. """
+
+
+class DaemonRunner(object):
+ """ Controller for a callable running in a separate background process.
+
+ The first command-line argument is the action to take:
+
+ * 'start': Become a daemon and call `app.run()`.
+ * 'stop': Exit the daemon process specified in the PID file.
+ * 'restart': Stop, then start.
+
+ """
+
+ start_message = "started with pid %(pid)d"
+
+ def __init__(self, app):
+ """ Set up the parameters of a new runner.
+
+ The `app` argument must have the following attributes:
+
+ * `stdin_path`, `stdout_path`, `stderr_path`: Filesystem
+ paths to open and replace the existing `sys.stdin`,
+ `sys.stdout`, `sys.stderr`.
+
+ * `pidfile_path`: Absolute filesystem path to a file that
+ will be used as the PID file for the daemon. If
+ ``None``, no PID file will be used.
+
+ * `pidfile_timeout`: Used as the default acquisition
+ timeout value supplied to the runner's PID lock file.
+
+ * `run`: Callable that will be invoked when the daemon is
+ started.
+
+ """
+ self.parse_args()
+ self.app = app
+ self.daemon_context = DaemonContext()
+ self.daemon_context.stdin = open(app.stdin_path, 'r')
+ self.daemon_context.stdout = open(app.stdout_path, 'w+')
+ self.daemon_context.stderr = open(
+ app.stderr_path, 'w+', buffering=0)
+
+ self.pidfile = None
+ if app.pidfile_path is not None:
+ self.pidfile = make_pidlockfile(
+ app.pidfile_path, app.pidfile_timeout)
+ self.daemon_context.pidfile = self.pidfile
+
+ def _usage_exit(self, argv):
+ """ Emit a usage message, then exit.
+ """
+ progname = os.path.basename(argv[0])
+ usage_exit_code = 2
+ action_usage = "|".join(self.action_funcs.keys())
+ message = "usage: %(progname)s %(action_usage)s" % vars()
+ emit_message(message)
+ sys.exit(usage_exit_code)
+
+ def parse_args(self, argv=None):
+ """ Parse command-line arguments.
+ """
+ if argv is None:
+ argv = sys.argv
+
+ min_args = 2
+ if len(argv) < min_args:
+ self._usage_exit(argv)
+
+ self.action = argv[1]
+ if self.action not in self.action_funcs:
+ self._usage_exit(argv)
+
+ def _start(self):
+ """ Open the daemon context and run the application.
+ """
+ if is_pidfile_stale(self.pidfile):
+ self.pidfile.break_lock()
+
+ try:
+ self.daemon_context.open()
+ except pidlockfile.AlreadyLocked:
+ pidfile_path = self.pidfile.path
+ raise DaemonRunnerStartFailureError(
+ "PID file %(pidfile_path)r already locked" % vars())
+
+ pid = os.getpid()
+ message = self.start_message % vars()
+ emit_message(message)
+
+ self.app.run()
+
+ def _terminate_daemon_process(self):
+ """ Terminate the daemon process specified in the current PID file.
+ """
+ pid = self.pidfile.read_pid()
+ try:
+ os.kill(pid, signal.SIGTERM)
+ except OSError, exc:
+ raise DaemonRunnerStopFailureError(
+ "Failed to terminate %(pid)d: %(exc)s" % vars())
+
+ def _stop(self):
+ """ Exit the daemon process specified in the current PID file.
+ """
+ if not self.pidfile.is_locked():
+ pidfile_path = self.pidfile.path
+ raise DaemonRunnerStopFailureError(
+ "PID file %(pidfile_path)r not locked" % vars())
+
+ if is_pidfile_stale(self.pidfile):
+ self.pidfile.break_lock()
+ else:
+ self._terminate_daemon_process()
+
+ def _restart(self):
+ """ Stop, then start.
+ """
+ self._stop()
+ self._start()
+
+ action_funcs = {
+ 'start': _start,
+ 'stop': _stop,
+ 'restart': _restart,
+ }
+
+ def _get_action_func(self):
+ """ Return the function for the specified action.
+
+ Raises ``DaemonRunnerInvalidActionError`` if the action is
+ unknown.
+
+ """
+ try:
+ func = self.action_funcs[self.action]
+ except KeyError:
+ raise DaemonRunnerInvalidActionError(
+ "Unknown action: %(action)r" % vars(self))
+ return func
+
+ def do_action(self):
+ """ Perform the requested action.
+ """
+ func = self._get_action_func()
+ func(self)
+
+
+def emit_message(message, stream=None):
+ """ Emit a message to the specified stream (default `sys.stderr`). """
+ if stream is None:
+ stream = sys.stderr
+ stream.write("%(message)s\n" % vars())
+ stream.flush()
+
+
+def make_pidlockfile(path, acquire_timeout):
+ """ Make a PIDLockFile instance with the given filesystem path. """
+ if not isinstance(path, basestring):
+ error = ValueError("Not a filesystem path: %(path)r" % vars())
+ raise error
+ if not os.path.isabs(path):
+ error = ValueError("Not an absolute path: %(path)r" % vars())
+ raise error
+ lockfile = pidlockfile.TimeoutPIDLockFile(path, acquire_timeout)
+
+ return lockfile
+
+
+def is_pidfile_stale(pidfile):
+ """ Determine whether a PID file is stale.
+
+ Return ``True`` (“stale”) if the contents of the PID file are
+ valid but do not match the PID of a currently-running process;
+ otherwise return ``False``.
+
+ """
+ result = False
+
+ pidfile_pid = pidfile.read_pid()
+ if pidfile_pid is not None:
+ try:
+ os.kill(pidfile_pid, signal.SIG_DFL)
+ except OSError, exc:
+ if exc.errno == errno.ESRCH:
+ # The specified PID does not exist
+ result = True
+
+ return result
diff --git a/daemon/version/__init__.py b/daemon/version/__init__.py
new file mode 100644
index 0000000..d2eafa6
--- /dev/null
+++ b/daemon/version/__init__.py
@@ -0,0 +1,36 @@
+# -*- coding: utf-8 -*-
+
+# daemon/version/__init__.py
+# Part of python-daemon, an implementation of PEP 3143.
+#
+# Copyright © 2008–2010 Ben Finney <ben+python@benfinney.id.au>
+# This is free software: you may copy, modify, and/or distribute this work
+# under the terms of the Python Software Foundation License, version 2 or
+# later as published by the Python Software Foundation.
+# No warranty expressed or implied. See the file LICENSE.PSF-2 for details.
+
+""" Version information for the python-daemon distribution. """
+
+from version_info import version_info
+
+version_info['version_string'] = u"1.5.5"
+
+version_short = u"%(version_string)s" % version_info
+version_full = u"%(version_string)s.r%(revno)s" % version_info
+version = version_short
+
+author_name = u"Ben Finney"
+author_email = u"ben+python@benfinney.id.au"
+author = u"%(author_name)s <%(author_email)s>" % vars()
+
+copyright_year_begin = u"2001"
+date = version_info['date'].split(' ', 1)[0]
+copyright_year = date.split('-')[0]
+copyright_year_range = copyright_year_begin
+if copyright_year > copyright_year_begin:
+ copyright_year_range += u"–%(copyright_year)s" % vars()
+
+copyright = (
+ u"Copyright © %(copyright_year_range)s %(author)s and others"
+ ) % vars()
+license = u"PSF-2+"
diff --git a/daemon/version/version_info.py b/daemon/version/version_info.py
new file mode 100644
index 0000000..cdbf280
--- /dev/null
+++ b/daemon/version/version_info.py
@@ -0,0 +1,23 @@
+#!/usr/bin/env python
+"""This file is automatically generated by generate_version_info
+It uses the current working tree to determine the revision.
+So don't edit it. :)
+"""
+
+version_info = {'branch_nick': u'python-daemon.devel',
+ 'build_date': '2009-05-22 19:50:06 +1000',
+ 'clean': None,
+ 'date': '2009-05-22 19:47:30 +1000',
+ 'revision_id': 'ben+python@benfinney.id.au-20090522094730-p4vsa0reh7ktt4e1',
+ 'revno': 145}
+
+revisions = {}
+
+file_revisions = {}
+
+
+
+if __name__ == '__main__':
+ print 'revision: %(revno)d' % version_info
+ print 'nick: %(branch_nick)s' % version_info
+ print 'revision id: %(revision_id)s' % version_info
diff --git a/python_daemon.egg-info/PKG-INFO b/python_daemon.egg-info/PKG-INFO
new file mode 100644
index 0000000..df8f553
--- /dev/null
+++ b/python_daemon.egg-info/PKG-INFO
@@ -0,0 +1,37 @@
+Metadata-Version: 1.0
+Name: python-daemon
+Version: 1.5.5
+Summary: Library to implement a well-behaved Unix daemon process.
+Home-page: http://pypi.python.org/pypi/python-daemon/
+Author: Ben Finney
+Author-email: ben+python@benfinney.id.au
+License: PSF-2+
+Description: This library implements the well-behaved daemon specification of
+ :pep:`3143`, "Standard daemon process library".
+
+ A well-behaved Unix daemon process is tricky to get right, but the
+ required steps are much the same for every daemon program. A
+ `DaemonContext` instance holds the behaviour and configured
+ process environment for the program; use the instance as a context
+ manager to enter a daemon state.
+
+ Simple example of usage::
+
+ import daemon
+
+ from spam import do_main_program
+
+ with daemon.DaemonContext():
+ do_main_program()
+
+ Customisation of the steps to become a daemon is available by
+ setting options on the `DaemonContext` instance; see the
+ documentation for that class for each option.
+Keywords: daemon,fork,unix
+Platform: UNKNOWN
+Classifier: Development Status :: 4 - Beta
+Classifier: License :: OSI Approved :: Python Software Foundation License
+Classifier: Operating System :: POSIX
+Classifier: Programming Language :: Python
+Classifier: Intended Audience :: Developers
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
diff --git a/python_daemon.egg-info/SOURCES.txt b/python_daemon.egg-info/SOURCES.txt
new file mode 100644
index 0000000..ab2b523
--- /dev/null
+++ b/python_daemon.egg-info/SOURCES.txt
@@ -0,0 +1,22 @@
+ChangeLog
+LICENSE.GPL-2
+LICENSE.PSF-2
+MANIFEST.in
+setup.py
+daemon/__init__.py
+daemon/daemon.py
+daemon/pidlockfile.py
+daemon/runner.py
+daemon/version/__init__.py
+daemon/version/version_info.py
+python_daemon.egg-info/PKG-INFO
+python_daemon.egg-info/SOURCES.txt
+python_daemon.egg-info/dependency_links.txt
+python_daemon.egg-info/not-zip-safe
+python_daemon.egg-info/requires.txt
+python_daemon.egg-info/top_level.txt
+test/__init__.py
+test/scaffold.py
+test/test_daemon.py
+test/test_pidlockfile.py
+test/test_runner.py \ No newline at end of file
diff --git a/python_daemon.egg-info/dependency_links.txt b/python_daemon.egg-info/dependency_links.txt
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/python_daemon.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/python_daemon.egg-info/not-zip-safe b/python_daemon.egg-info/not-zip-safe
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/python_daemon.egg-info/not-zip-safe
@@ -0,0 +1 @@
+
diff --git a/python_daemon.egg-info/requires.txt b/python_daemon.egg-info/requires.txt
new file mode 100644
index 0000000..1c7ae21
--- /dev/null
+++ b/python_daemon.egg-info/requires.txt
@@ -0,0 +1,2 @@
+setuptools
+lockfile >=0.7 \ No newline at end of file
diff --git a/python_daemon.egg-info/top_level.txt b/python_daemon.egg-info/top_level.txt
new file mode 100644
index 0000000..28e3ee0
--- /dev/null
+++ b/python_daemon.egg-info/top_level.txt
@@ -0,0 +1 @@
+daemon
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 0000000..861a9f5
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,5 @@
+[egg_info]
+tag_build =
+tag_date = 0
+tag_svn_revision = 0
+
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000..8570c8a
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,64 @@
+# -*- coding: utf-8 -*-
+
+# setup.py
+# Part of python-daemon, an implementation of PEP 3143.
+#
+# Copyright © 2008–2010 Ben Finney <ben+python@benfinney.id.au>
+# Copyright © 2008 Robert Niederreiter, Jens Klein
+#
+# This is free software: you may copy, modify, and/or distribute this work
+# under the terms of the Python Software Foundation License, version 2 or
+# later as published by the Python Software Foundation.
+# No warranty expressed or implied. See the file LICENSE.PSF-2 for details.
+
+""" Distribution setup for python-daemon library.
+ """
+
+import textwrap
+from setuptools import setup, find_packages
+
+distribution_name = "python-daemon"
+main_module_name = 'daemon'
+main_module = __import__(main_module_name, fromlist=['version'])
+version = main_module.version
+
+short_description, long_description = (
+ textwrap.dedent(d).strip()
+ for d in main_module.__doc__.split(u'\n\n', 1)
+ )
+
+
+setup(
+ name=distribution_name,
+ version=version.version,
+ packages=find_packages(exclude=["test"]),
+
+ # setuptools metadata
+ zip_safe=False,
+ test_suite="test.suite",
+ tests_require=[
+ "MiniMock >=1.2.2",
+ ],
+ install_requires=[
+ "setuptools",
+ "lockfile >=0.7",
+ ],
+
+ # PyPI metadata
+ author=version.author_name,
+ author_email=version.author_email,
+ description=short_description,
+ license=version.license,
+ keywords=u"daemon fork unix".split(),
+ url=main_module._url,
+ long_description=long_description,
+ classifiers=[
+ # Reference: http://pypi.python.org/pypi?%3Aaction=list_classifiers
+ "Development Status :: 4 - Beta",
+ "License :: OSI Approved :: Python Software Foundation License",
+ "Operating System :: POSIX",
+ "Programming Language :: Python",
+ "Intended Audience :: Developers",
+ "Topic :: Software Development :: Libraries :: Python Modules",
+ ],
+ )
diff --git a/test/__init__.py b/test/__init__.py
new file mode 100644
index 0000000..b3efac7
--- /dev/null
+++ b/test/__init__.py
@@ -0,0 +1,19 @@
+# -*- coding: utf-8 -*-
+#
+# test/__init__.py
+# Part of python-daemon, an implementation of PEP 3143.
+#
+# Copyright © 2008–2010 Ben Finney <ben+python@benfinney.id.au>
+#
+# This is free software: you may copy, modify, and/or distribute this work
+# under the terms of the Python Software Foundation License, version 2 or
+# later as published by the Python Software Foundation.
+# No warranty expressed or implied. See the file LICENSE.PSF-2 for details.
+
+""" Unit test suite for daemon package.
+ """
+
+import scaffold
+
+
+suite = scaffold.make_suite()
diff --git a/test/scaffold.py b/test/scaffold.py
new file mode 100644
index 0000000..566cfb9
--- /dev/null
+++ b/test/scaffold.py
@@ -0,0 +1,402 @@
+# -*- coding: utf-8 -*-
+
+# test/scaffold.py
+# Part of python-daemon, an implementation of PEP 3143.
+#
+# Copyright © 2007–2010 Ben Finney <ben+python@benfinney.id.au>
+# This is free software; you may copy, modify and/or distribute this work
+# under the terms of the GNU General Public License, version 2 or later.
+# No warranty expressed or implied. See the file LICENSE.GPL-2 for details.
+
+""" Scaffolding for unit test modules.
+ """
+
+import unittest
+import doctest
+import logging
+import os
+import sys
+import operator
+import textwrap
+from minimock import (
+ Mock,
+ TraceTracker as MockTracker,
+ mock,
+ restore as mock_restore,
+ )
+
+test_dir = os.path.dirname(os.path.abspath(__file__))
+parent_dir = os.path.dirname(test_dir)
+if not test_dir in sys.path:
+ sys.path.insert(1, test_dir)
+if not parent_dir in sys.path:
+ sys.path.insert(1, parent_dir)
+
+# Disable all but the most critical logging messages
+logging.disable(logging.CRITICAL)
+
+
+def get_python_module_names(file_list, file_suffix='.py'):
+ """ Return a list of module names from a filename list. """
+ module_names = [m[:m.rfind(file_suffix)] for m in file_list
+ if m.endswith(file_suffix)]
+ return module_names
+
+
+def get_test_module_names(module_list, module_prefix='test_'):
+ """ Return the list of module names that qualify as test modules. """
+ module_names = [m for m in module_list
+ if m.startswith(module_prefix)]
+ return module_names
+
+
+def make_suite(path=test_dir):
+ """ Create the test suite for the given path. """
+ loader = unittest.TestLoader()
+ python_module_names = get_python_module_names(os.listdir(path))
+ test_module_names = get_test_module_names(python_module_names)
+ suite = loader.loadTestsFromNames(test_module_names)
+
+ return suite
+
+
+def get_function_signature(func):
+ """ Get the function signature as a mapping of attributes. """
+ arg_count = func.func_code.co_argcount
+ arg_names = func.func_code.co_varnames[:arg_count]
+
+ arg_defaults = {}
+ func_defaults = ()
+ if func.func_defaults is not None:
+ func_defaults = func.func_defaults
+ for (name, value) in zip(arg_names[::-1], func_defaults[::-1]):
+ arg_defaults[name] = value
+
+ signature = {
+ 'name': func.__name__,
+ 'arg_count': arg_count,
+ 'arg_names': arg_names,
+ 'arg_defaults': arg_defaults,
+ }
+
+ non_pos_names = list(func.func_code.co_varnames[arg_count:])
+ COLLECTS_ARBITRARY_POSITIONAL_ARGS = 0x04
+ if func.func_code.co_flags & COLLECTS_ARBITRARY_POSITIONAL_ARGS:
+ signature['var_args'] = non_pos_names.pop(0)
+ COLLECTS_ARBITRARY_KEYWORD_ARGS = 0x08
+ if func.func_code.co_flags & COLLECTS_ARBITRARY_KEYWORD_ARGS:
+ signature['var_kw_args'] = non_pos_names.pop(0)
+
+ return signature
+
+
+def format_function_signature(func):
+ """ Format the function signature as printable text. """
+ signature = get_function_signature(func)
+
+ args_text = []
+ for arg_name in signature['arg_names']:
+ if arg_name in signature['arg_defaults']:
+ arg_default = signature['arg_defaults'][arg_name]
+ arg_text_template = "%(arg_name)s=%(arg_default)r"
+ else:
+ arg_text_template = "%(arg_name)s"
+ args_text.append(arg_text_template % vars())
+ if 'var_args' in signature:
+ args_text.append("*%(var_args)s" % signature)
+ if 'var_kw_args' in signature:
+ args_text.append("**%(var_kw_args)s" % signature)
+ signature_args_text = ", ".join(args_text)
+
+ func_name = signature['name']
+ signature_text = (
+ "%(func_name)s(%(signature_args_text)s)" % vars())
+
+ return signature_text
+
+
+class TestCase(unittest.TestCase):
+ """ Test case behaviour. """
+
+ def failUnlessRaises(self, exc_class, func, *args, **kwargs):
+ """ Fail unless the function call raises the expected exception.
+
+ Fail the test if an instance of the exception class
+ ``exc_class`` is not raised when calling ``func`` with the
+ arguments ``*args`` and ``**kwargs``.
+
+ """
+ try:
+ super(TestCase, self).failUnlessRaises(
+ exc_class, func, *args, **kwargs)
+ except self.failureException:
+ exc_class_name = exc_class.__name__
+ msg = (
+ "Exception %(exc_class_name)s not raised"
+ " for function call:"
+ " func=%(func)r args=%(args)r kwargs=%(kwargs)r"
+ ) % vars()
+ raise self.failureException(msg)
+
+ def failIfIs(self, first, second, msg=None):
+ """ Fail if the two objects are identical.
+
+ Fail the test if ``first`` and ``second`` are identical,
+ as determined by the ``is`` operator.
+
+ """
+ if first is second:
+ if msg is None:
+ msg = "%(first)r is %(second)r" % vars()
+ raise self.failureException(msg)
+
+ def failUnlessIs(self, first, second, msg=None):
+ """ Fail unless the two objects are identical.
+
+ Fail the test unless ``first`` and ``second`` are
+ identical, as determined by the ``is`` operator.
+
+ """
+ if first is not second:
+ if msg is None:
+ msg = "%(first)r is not %(second)r" % vars()
+ raise self.failureException(msg)
+
+ assertIs = failUnlessIs
+ assertNotIs = failIfIs
+
+ def failIfIn(self, first, second, msg=None):
+ """ Fail if the second object is in the first.
+
+ Fail the test if ``first`` contains ``second``, as
+ determined by the ``in`` operator.
+
+ """
+ if second in first:
+ if msg is None:
+ msg = "%(second)r is in %(first)r" % vars()
+ raise self.failureException(msg)
+
+ def failUnlessIn(self, first, second, msg=None):
+ """ Fail unless the second object is in the first.
+
+ Fail the test unless ``first`` contains ``second``, as
+ determined by the ``in`` operator.
+
+ """
+ if second not in first:
+ if msg is None:
+ msg = "%(second)r is not in %(first)r" % vars()
+ raise self.failureException(msg)
+
+ assertIn = failUnlessIn
+ assertNotIn = failIfIn
+
+ def failUnlessOutputCheckerMatch(self, want, got, msg=None):
+ """ Fail unless the specified string matches the expected.
+
+ Fail the test unless ``want`` matches ``got``, as
+ determined by a ``doctest.OutputChecker`` instance. This
+ is not an equality check, but a pattern match according to
+ the ``OutputChecker`` rules.
+
+ """
+ checker = doctest.OutputChecker()
+ want = textwrap.dedent(want)
+ source = ""
+ example = doctest.Example(source, want)
+ got = textwrap.dedent(got)
+ checker_optionflags = reduce(operator.or_, [
+ doctest.ELLIPSIS,
+ ])
+ if not checker.check_output(want, got, checker_optionflags):
+ if msg is None:
+ diff = checker.output_difference(
+ example, got, checker_optionflags)
+ msg = "\n".join([
+ "Output received did not match expected output",
+ "%(diff)s",
+ ]) % vars()
+ raise self.failureException(msg)
+
+ assertOutputCheckerMatch = failUnlessOutputCheckerMatch
+
+ def failUnlessMockCheckerMatch(self, want, tracker=None, msg=None):
+ """ Fail unless the mock tracker matches the wanted output.
+
+ Fail the test unless `want` matches the output tracked by
+ `tracker` (defaults to ``self.mock_tracker``. This is not
+ an equality check, but a pattern match according to the
+ ``minimock.MinimockOutputChecker`` rules.
+
+ """
+ if tracker is None:
+ tracker = self.mock_tracker
+ if not tracker.check(want):
+ if msg is None:
+ diff = tracker.diff(want)
+ msg = "\n".join([
+ "Output received did not match expected output",
+ "%(diff)s",
+ ]) % vars()
+ raise self.failureException(msg)
+
+ def failIfMockCheckerMatch(self, want, tracker=None, msg=None):
+ """ Fail if the mock tracker matches the specified output.
+
+ Fail the test if `want` matches the output tracked by
+ `tracker` (defaults to ``self.mock_tracker``. This is not
+ an equality check, but a pattern match according to the
+ ``minimock.MinimockOutputChecker`` rules.
+
+ """
+ if tracker is None:
+ tracker = self.mock_tracker
+ if tracker.check(want):
+ if msg is None:
+ diff = tracker.diff(want)
+ msg = "\n".join([
+ "Output received matched specified undesired output",
+ "%(diff)s",
+ ]) % vars()
+ raise self.failureException(msg)
+
+ assertMockCheckerMatch = failUnlessMockCheckerMatch
+ assertNotMockCheckerMatch = failIfMockCheckerMatch
+
+ def failIfIsInstance(self, obj, classes, msg=None):
+ """ Fail if the object is an instance of the specified classes.
+
+ Fail the test if the object ``obj`` is an instance of any
+ of ``classes``.
+
+ """
+ if isinstance(obj, classes):
+ if msg is None:
+ msg = (
+ "%(obj)r is an instance of one of %(classes)r"
+ ) % vars()
+ raise self.failureException(msg)
+
+ def failUnlessIsInstance(self, obj, classes, msg=None):
+ """ Fail unless the object is an instance of the specified classes.
+
+ Fail the test unless the object ``obj`` is an instance of
+ any of ``classes``.
+
+ """
+ if not isinstance(obj, classes):
+ if msg is None:
+ msg = (
+ "%(obj)r is not an instance of any of %(classes)r"
+ ) % vars()
+ raise self.failureException(msg)
+
+ assertIsInstance = failUnlessIsInstance
+ assertNotIsInstance = failIfIsInstance
+
+ def failUnlessFunctionInTraceback(self, traceback, function, msg=None):
+ """ Fail if the function is not in the traceback.
+
+ Fail the test if the function ``function`` is not at any
+ of the levels in the traceback object ``traceback``.
+
+ """
+ func_in_traceback = False
+ expect_code = function.func_code
+ current_traceback = traceback
+ while current_traceback is not None:
+ if expect_code is current_traceback.tb_frame.f_code:
+ func_in_traceback = True
+ break
+ current_traceback = current_traceback.tb_next
+
+ if not func_in_traceback:
+ if msg is None:
+ msg = (
+ "Traceback did not lead to original function"
+ " %(function)s"
+ ) % vars()
+ raise self.failureException(msg)
+
+ assertFunctionInTraceback = failUnlessFunctionInTraceback
+
+ def failUnlessFunctionSignatureMatch(self, first, second, msg=None):
+ """ Fail if the function signatures do not match.
+
+ Fail the test if the function signature does not match
+ between the ``first`` function and the ``second``
+ function.
+
+ The function signature includes:
+
+ * function name,
+
+ * count of named parameters,
+
+ * sequence of named parameters,
+
+ * default values of named parameters,
+
+ * collector for arbitrary positional arguments,
+
+ * collector for arbitrary keyword arguments.
+
+ """
+ first_signature = get_function_signature(first)
+ second_signature = get_function_signature(second)
+
+ if first_signature != second_signature:
+ if msg is None:
+ first_signature_text = format_function_signature(first)
+ second_signature_text = format_function_signature(second)
+ msg = (textwrap.dedent("""\
+ Function signatures do not match:
+ %(first_signature)r != %(second_signature)r
+ Expected:
+ %(first_signature_text)s
+ Got:
+ %(second_signature_text)s""")
+ ) % vars()
+ raise self.failureException(msg)
+
+ assertFunctionSignatureMatch = failUnlessFunctionSignatureMatch
+
+
+class Exception_TestCase(TestCase):
+ """ Test cases for exception classes. """
+
+ def __init__(self, *args, **kwargs):
+ """ Set up a new instance """
+ self.valid_exceptions = NotImplemented
+ super(Exception_TestCase, self).__init__(*args, **kwargs)
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ for exc_type, params in self.valid_exceptions.items():
+ args = (None, ) * params['min_args']
+ params['args'] = args
+ instance = exc_type(*args)
+ params['instance'] = instance
+
+ super(Exception_TestCase, self).setUp()
+
+ def test_exception_instance(self):
+ """ Exception instance should be created. """
+ for params in self.valid_exceptions.values():
+ instance = params['instance']
+ self.failIfIs(None, instance)
+
+ def test_exception_types(self):
+ """ Exception instances should match expected types. """
+ for params in self.valid_exceptions.values():
+ instance = params['instance']
+ for match_type in params['types']:
+ match_type_name = match_type.__name__
+ fail_msg = (
+ "%(instance)r is not an instance of"
+ " %(match_type_name)s"
+ ) % vars()
+ self.failUnless(
+ isinstance(instance, match_type),
+ msg=fail_msg)
diff --git a/test/test_daemon.py b/test/test_daemon.py
new file mode 100644
index 0000000..c3f46e3
--- /dev/null
+++ b/test/test_daemon.py
@@ -0,0 +1,1937 @@
+# -*- coding: utf-8 -*-
+#
+# test/test_daemon.py
+# Part of python-daemon, an implementation of PEP 3143.
+#
+# Copyright © 2008–2010 Ben Finney <ben+python@benfinney.id.au>
+#
+# This is free software: you may copy, modify, and/or distribute this work
+# under the terms of the Python Software Foundation License, version 2 or
+# later as published by the Python Software Foundation.
+# No warranty expressed or implied. See the file LICENSE.PSF-2 for details.
+
+""" Unit test for daemon module.
+ """
+
+import os
+import sys
+import tempfile
+import resource
+import errno
+import signal
+import socket
+from types import ModuleType
+import atexit
+from StringIO import StringIO
+
+import scaffold
+from test_pidlockfile import (
+ FakeFileDescriptorStringIO,
+ setup_pidfile_fixtures,
+ )
+
+from daemon import pidlockfile
+import daemon
+
+
+class Exception_TestCase(scaffold.Exception_TestCase):
+ """ Test cases for module exception classes. """
+
+ def __init__(self, *args, **kwargs):
+ """ Set up a new instance. """
+ super(Exception_TestCase, self).__init__(*args, **kwargs)
+
+ self.valid_exceptions = {
+ daemon.daemon.DaemonError: dict(
+ min_args = 1,
+ types = (Exception,),
+ ),
+ daemon.daemon.DaemonOSEnvironmentError: dict(
+ min_args = 1,
+ types = (daemon.daemon.DaemonError, OSError),
+ ),
+ daemon.daemon.DaemonProcessDetachError: dict(
+ min_args = 1,
+ types = (daemon.daemon.DaemonError, OSError),
+ ),
+ }
+
+
+def setup_daemon_context_fixtures(testcase):
+ """ Set up common test fixtures for DaemonContext test case. """
+ testcase.mock_tracker = scaffold.MockTracker()
+
+ setup_streams_fixtures(testcase)
+
+ setup_pidfile_fixtures(testcase)
+
+ testcase.mock_pidfile_path = tempfile.mktemp()
+ testcase.mock_pidlockfile = scaffold.Mock(
+ "pidlockfile.PIDLockFile",
+ tracker=testcase.mock_tracker)
+ testcase.mock_pidlockfile.path = testcase.mock_pidfile_path
+
+ scaffold.mock(
+ "daemon.daemon.is_detach_process_context_required",
+ returns=True,
+ tracker=testcase.mock_tracker)
+ scaffold.mock(
+ "daemon.daemon.make_default_signal_map",
+ returns=object(),
+ tracker=testcase.mock_tracker)
+
+ scaffold.mock(
+ "os.getuid",
+ returns=object(),
+ tracker=testcase.mock_tracker)
+ scaffold.mock(
+ "os.getgid",
+ returns=object(),
+ tracker=testcase.mock_tracker)
+
+ testcase.daemon_context_args = dict(
+ stdin = testcase.stream_files_by_name['stdin'],
+ stdout = testcase.stream_files_by_name['stdout'],
+ stderr = testcase.stream_files_by_name['stderr'],
+ )
+ testcase.test_instance = daemon.DaemonContext(
+ **testcase.daemon_context_args)
+
+
+class DaemonContext_TestCase(scaffold.TestCase):
+ """ Test cases for DaemonContext class. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ setup_daemon_context_fixtures(self)
+
+ def tearDown(self):
+ """ Tear down test fixtures. """
+ scaffold.mock_restore()
+
+ def test_instantiate(self):
+ """ New instance of DaemonContext should be created. """
+ self.failUnlessIsInstance(
+ self.test_instance, daemon.daemon.DaemonContext)
+
+ def test_minimum_zero_arguments(self):
+ """ Initialiser should not require any arguments. """
+ instance = daemon.daemon.DaemonContext()
+ self.failIfIs(None, instance)
+
+ def test_has_specified_chroot_directory(self):
+ """ Should have specified chroot_directory option. """
+ args = dict(
+ chroot_directory = object(),
+ )
+ expect_directory = args['chroot_directory']
+ instance = daemon.daemon.DaemonContext(**args)
+ self.failUnlessEqual(expect_directory, instance.chroot_directory)
+
+ def test_has_specified_working_directory(self):
+ """ Should have specified working_directory option. """
+ args = dict(
+ working_directory = object(),
+ )
+ expect_directory = args['working_directory']
+ instance = daemon.daemon.DaemonContext(**args)
+ self.failUnlessEqual(expect_directory, instance.working_directory)
+
+ def test_has_default_working_directory(self):
+ """ Should have default working_directory option. """
+ args = dict()
+ expect_directory = '/'
+ instance = daemon.daemon.DaemonContext(**args)
+ self.failUnlessEqual(expect_directory, instance.working_directory)
+
+ def test_has_specified_creation_mask(self):
+ """ Should have specified umask option. """
+ args = dict(
+ umask = object(),
+ )
+ expect_mask = args['umask']
+ instance = daemon.daemon.DaemonContext(**args)
+ self.failUnlessEqual(expect_mask, instance.umask)
+
+ def test_has_default_creation_mask(self):
+ """ Should have default umask option. """
+ args = dict()
+ expect_mask = 0
+ instance = daemon.daemon.DaemonContext(**args)
+ self.failUnlessEqual(expect_mask, instance.umask)
+
+ def test_has_specified_uid(self):
+ """ Should have specified uid option. """
+ args = dict(
+ uid = object(),
+ )
+ expect_id = args['uid']
+ instance = daemon.daemon.DaemonContext(**args)
+ self.failUnlessEqual(expect_id, instance.uid)
+
+ def test_has_derived_uid(self):
+ """ Should have uid option derived from process. """
+ args = dict()
+ expect_id = os.getuid()
+ instance = daemon.daemon.DaemonContext(**args)
+ self.failUnlessEqual(expect_id, instance.uid)
+
+ def test_has_specified_gid(self):
+ """ Should have specified gid option. """
+ args = dict(
+ gid = object(),
+ )
+ expect_id = args['gid']
+ instance = daemon.daemon.DaemonContext(**args)
+ self.failUnlessEqual(expect_id, instance.gid)
+
+ def test_has_derived_gid(self):
+ """ Should have gid option derived from process. """
+ args = dict()
+ expect_id = os.getgid()
+ instance = daemon.daemon.DaemonContext(**args)
+ self.failUnlessEqual(expect_id, instance.gid)
+
+ def test_has_specified_detach_process(self):
+ """ Should have specified detach_process option. """
+ args = dict(
+ detach_process = object(),
+ )
+ expect_value = args['detach_process']
+ instance = daemon.daemon.DaemonContext(**args)
+ self.failUnlessEqual(expect_value, instance.detach_process)
+
+ def test_has_derived_detach_process(self):
+ """ Should have detach_process option derived from environment. """
+ args = dict()
+ func = daemon.daemon.is_detach_process_context_required
+ expect_value = func()
+ instance = daemon.daemon.DaemonContext(**args)
+ self.failUnlessEqual(expect_value, instance.detach_process)
+
+ def test_has_specified_files_preserve(self):
+ """ Should have specified files_preserve option. """
+ args = dict(
+ files_preserve = object(),
+ )
+ expect_files_preserve = args['files_preserve']
+ instance = daemon.daemon.DaemonContext(**args)
+ self.failUnlessEqual(expect_files_preserve, instance.files_preserve)
+
+ def test_has_specified_pidfile(self):
+ """ Should have the specified pidfile. """
+ args = dict(
+ pidfile = object(),
+ )
+ expect_pidfile = args['pidfile']
+ instance = daemon.daemon.DaemonContext(**args)
+ self.failUnlessEqual(expect_pidfile, instance.pidfile)
+
+ def test_has_specified_stdin(self):
+ """ Should have specified stdin option. """
+ args = dict(
+ stdin = object(),
+ )
+ expect_file = args['stdin']
+ instance = daemon.daemon.DaemonContext(**args)
+ self.failUnlessEqual(expect_file, instance.stdin)
+
+ def test_has_specified_stdout(self):
+ """ Should have specified stdout option. """
+ args = dict(
+ stdout = object(),
+ )
+ expect_file = args['stdout']
+ instance = daemon.daemon.DaemonContext(**args)
+ self.failUnlessEqual(expect_file, instance.stdout)
+
+ def test_has_specified_stderr(self):
+ """ Should have specified stderr option. """
+ args = dict(
+ stderr = object(),
+ )
+ expect_file = args['stderr']
+ instance = daemon.daemon.DaemonContext(**args)
+ self.failUnlessEqual(expect_file, instance.stderr)
+
+ def test_has_specified_signal_map(self):
+ """ Should have specified signal_map option. """
+ args = dict(
+ signal_map = object(),
+ )
+ expect_signal_map = args['signal_map']
+ instance = daemon.daemon.DaemonContext(**args)
+ self.failUnlessEqual(expect_signal_map, instance.signal_map)
+
+ def test_has_derived_signal_map(self):
+ """ Should have signal_map option derived from system. """
+ args = dict()
+ expect_signal_map = daemon.daemon.make_default_signal_map()
+ instance = daemon.daemon.DaemonContext(**args)
+ self.failUnlessEqual(expect_signal_map, instance.signal_map)
+
+
+class DaemonContext_is_open_TestCase(scaffold.TestCase):
+ """ Test cases for DaemonContext.is_open property. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ setup_daemon_context_fixtures(self)
+
+ def tearDown(self):
+ """ Tear down test fixtures. """
+ scaffold.mock_restore()
+
+ def test_begin_false(self):
+ """ Initial value of is_open should be False. """
+ instance = self.test_instance
+ self.failUnlessEqual(False, instance.is_open)
+
+ def test_write_fails(self):
+ """ Writing to is_open should fail. """
+ instance = self.test_instance
+ self.failUnlessRaises(
+ AttributeError,
+ setattr, instance, 'is_open', object())
+
+
+class DaemonContext_open_TestCase(scaffold.TestCase):
+ """ Test cases for DaemonContext.open method. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ setup_daemon_context_fixtures(self)
+ self.mock_tracker.clear()
+
+ self.test_instance._is_open = False
+
+ scaffold.mock(
+ "daemon.daemon.detach_process_context",
+ tracker=self.mock_tracker)
+ scaffold.mock(
+ "daemon.daemon.change_working_directory",
+ tracker=self.mock_tracker)
+ scaffold.mock(
+ "daemon.daemon.change_root_directory",
+ tracker=self.mock_tracker)
+ scaffold.mock(
+ "daemon.daemon.change_file_creation_mask",
+ tracker=self.mock_tracker)
+ scaffold.mock(
+ "daemon.daemon.change_process_owner",
+ tracker=self.mock_tracker)
+ scaffold.mock(
+ "daemon.daemon.prevent_core_dump",
+ tracker=self.mock_tracker)
+ scaffold.mock(
+ "daemon.daemon.close_all_open_files",
+ tracker=self.mock_tracker)
+ scaffold.mock(
+ "daemon.daemon.redirect_stream",
+ tracker=self.mock_tracker)
+ scaffold.mock(
+ "daemon.daemon.set_signal_handlers",
+ tracker=self.mock_tracker)
+ scaffold.mock(
+ "daemon.daemon.register_atexit_function",
+ tracker=self.mock_tracker)
+
+ self.test_files_preserve_fds = object()
+ scaffold.mock(
+ "daemon.daemon.DaemonContext._get_exclude_file_descriptors",
+ returns=self.test_files_preserve_fds,
+ tracker=self.mock_tracker)
+
+ self.test_signal_handler_map = object()
+ scaffold.mock(
+ "daemon.daemon.DaemonContext._make_signal_handler_map",
+ returns=self.test_signal_handler_map,
+ tracker=self.mock_tracker)
+
+ scaffold.mock(
+ "sys.stdin",
+ tracker=self.mock_tracker)
+ scaffold.mock(
+ "sys.stdout",
+ tracker=self.mock_tracker)
+ scaffold.mock(
+ "sys.stderr",
+ tracker=self.mock_tracker)
+
+ def tearDown(self):
+ """ Tear down test fixtures. """
+ scaffold.mock_restore()
+
+ def test_performs_steps_in_expected_sequence(self):
+ """ Should perform daemonisation steps in expected sequence. """
+ instance = self.test_instance
+ instance.chroot_directory = object()
+ instance.detach_process = True
+ instance.pidfile = self.mock_pidlockfile
+ expect_mock_output = """\
+ Called daemon.daemon.change_root_directory(...)
+ Called daemon.daemon.prevent_core_dump()
+ Called daemon.daemon.change_file_creation_mask(...)
+ Called daemon.daemon.change_working_directory(...)
+ Called daemon.daemon.change_process_owner(...)
+ Called daemon.daemon.detach_process_context()
+ Called daemon.daemon.DaemonContext._make_signal_handler_map()
+ Called daemon.daemon.set_signal_handlers(...)
+ Called daemon.daemon.DaemonContext._get_exclude_file_descriptors()
+ Called daemon.daemon.close_all_open_files(...)
+ Called daemon.daemon.redirect_stream(...)
+ Called daemon.daemon.redirect_stream(...)
+ Called daemon.daemon.redirect_stream(...)
+ Called pidlockfile.PIDLockFile.__enter__()
+ Called daemon.daemon.register_atexit_function(...)
+ """ % vars()
+ self.mock_tracker.clear()
+ instance.open()
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_returns_immediately_if_is_open(self):
+ """ Should return immediately if is_open property is true. """
+ instance = self.test_instance
+ instance._is_open = True
+ expect_mock_output = """\
+ """
+ self.mock_tracker.clear()
+ instance.open()
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_changes_root_directory_to_chroot_directory(self):
+ """ Should change root directory to `chroot_directory` option. """
+ instance = self.test_instance
+ chroot_directory = object()
+ instance.chroot_directory = chroot_directory
+ expect_mock_output = """\
+ Called daemon.daemon.change_root_directory(
+ %(chroot_directory)r)
+ ...
+ """ % vars()
+ instance.open()
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_omits_chroot_if_no_chroot_directory(self):
+ """ Should omit changing root directory if no `chroot_directory`. """
+ instance = self.test_instance
+ instance.chroot_directory = None
+ unwanted_output = """\
+ ...Called daemon.daemon.change_root_directory(...)..."""
+ instance.open()
+ self.failIfMockCheckerMatch(unwanted_output)
+
+ def test_prevents_core_dump(self):
+ """ Should request prevention of core dumps. """
+ instance = self.test_instance
+ expect_mock_output = """\
+ Called daemon.daemon.prevent_core_dump()
+ ...
+ """ % vars()
+ instance.open()
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_omits_prevent_core_dump_if_prevent_core_false(self):
+ """ Should omit preventing core dumps if `prevent_core` is false. """
+ instance = self.test_instance
+ instance.prevent_core = False
+ unwanted_output = """\
+ ...Called daemon.daemon.prevent_core_dump()..."""
+ instance.open()
+ self.failIfMockCheckerMatch(unwanted_output)
+
+ def test_closes_open_files(self):
+ """ Should close all open files, excluding `files_preserve`. """
+ instance = self.test_instance
+ expect_exclude = self.test_files_preserve_fds
+ expect_mock_output = """\
+ ...
+ Called daemon.daemon.close_all_open_files(
+ exclude=%(expect_exclude)r)
+ ...
+ """ % vars()
+ instance.open()
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_changes_directory_to_working_directory(self):
+ """ Should change current directory to `working_directory` option. """
+ instance = self.test_instance
+ working_directory = object()
+ instance.working_directory = working_directory
+ expect_mock_output = """\
+ ...
+ Called daemon.daemon.change_working_directory(
+ %(working_directory)r)
+ ...
+ """ % vars()
+ instance.open()
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_changes_creation_mask_to_umask(self):
+ """ Should change file creation mask to `umask` option. """
+ instance = self.test_instance
+ umask = object()
+ instance.umask = umask
+ expect_mock_output = """\
+ ...
+ Called daemon.daemon.change_file_creation_mask(%(umask)r)
+ ...
+ """ % vars()
+ instance.open()
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_changes_owner_to_specified_uid_and_gid(self):
+ """ Should change process UID and GID to `uid` and `gid` options. """
+ instance = self.test_instance
+ uid = object()
+ gid = object()
+ instance.uid = uid
+ instance.gid = gid
+ expect_mock_output = """\
+ ...
+ Called daemon.daemon.change_process_owner(%(uid)r, %(gid)r)
+ ...
+ """ % vars()
+ instance.open()
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_detaches_process_context(self):
+ """ Should request detach of process context. """
+ instance = self.test_instance
+ expect_mock_output = """\
+ ...
+ Called daemon.daemon.detach_process_context()
+ ...
+ """ % vars()
+ instance.open()
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_omits_process_detach_if_not_required(self):
+ """ Should omit detach of process context if not required. """
+ instance = self.test_instance
+ instance.detach_process = False
+ unwanted_output = """\
+ ...Called daemon.daemon.detach_process_context(...)..."""
+ instance.open()
+ self.failIfMockCheckerMatch(unwanted_output)
+
+ def test_sets_signal_handlers_from_signal_map(self):
+ """ Should set signal handlers according to `signal_map`. """
+ instance = self.test_instance
+ instance.signal_map = object()
+ expect_signal_handler_map = self.test_signal_handler_map
+ expect_mock_output = """\
+ ...
+ Called daemon.daemon.set_signal_handlers(
+ %(expect_signal_handler_map)r)
+ ...
+ """ % vars()
+ instance.open()
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_redirects_standard_streams(self):
+ """ Should request redirection of standard stream files. """
+ instance = self.test_instance
+ (system_stdin, system_stdout, system_stderr) = (
+ sys.stdin, sys.stdout, sys.stderr)
+ (target_stdin, target_stdout, target_stderr) = (
+ self.stream_files_by_name[name]
+ for name in ['stdin', 'stdout', 'stderr'])
+ expect_mock_output = """\
+ ...
+ Called daemon.daemon.redirect_stream(
+ %(system_stdin)r, %(target_stdin)r)
+ Called daemon.daemon.redirect_stream(
+ %(system_stdout)r, %(target_stdout)r)
+ Called daemon.daemon.redirect_stream(
+ %(system_stderr)r, %(target_stderr)r)
+ ...
+ """ % vars()
+ instance.open()
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_enters_pidfile_context(self):
+ """ Should enter the PID file context manager. """
+ instance = self.test_instance
+ instance.pidfile = self.mock_pidlockfile
+ expect_mock_output = """\
+ ...
+ Called pidlockfile.PIDLockFile.__enter__()
+ ...
+ """
+ instance.open()
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_sets_is_open_true(self):
+ """ Should set the `is_open` property to True. """
+ instance = self.test_instance
+ instance.open()
+ self.failUnlessEqual(True, instance.is_open)
+
+ def test_registers_close_method_for_atexit(self):
+ """ Should register the `close` method for atexit processing. """
+ instance = self.test_instance
+ close_method = instance.close
+ expect_mock_output = """\
+ ...
+ Called daemon.daemon.register_atexit_function(%(close_method)r)
+ """ % vars()
+ instance.open()
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+
+class DaemonContext_close_TestCase(scaffold.TestCase):
+ """ Test cases for DaemonContext.close method. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ setup_daemon_context_fixtures(self)
+ self.mock_tracker.clear()
+
+ self.test_instance._is_open = True
+
+ def tearDown(self):
+ """ Tear down test fixtures. """
+ scaffold.mock_restore()
+
+ def test_returns_immediately_if_not_is_open(self):
+ """ Should return immediately if is_open property is false. """
+ instance = self.test_instance
+ instance._is_open = False
+ instance.pidfile = object()
+ expect_mock_output = """\
+ """
+ self.mock_tracker.clear()
+ instance.close()
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_exits_pidfile_context(self):
+ """ Should exit the PID file context manager. """
+ instance = self.test_instance
+ instance.pidfile = self.mock_pidlockfile
+ expect_mock_output = """\
+ Called pidlockfile.PIDLockFile.__exit__(None, None, None)
+ """
+ instance.close()
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_returns_none(self):
+ """ Should return None. """
+ instance = self.test_instance
+ expect_result = None
+ result = instance.close()
+ self.failUnlessIs(expect_result, result)
+
+ def test_sets_is_open_false(self):
+ """ Should set the `is_open` property to False. """
+ instance = self.test_instance
+ instance.close()
+ self.failUnlessEqual(False, instance.is_open)
+
+
+class DaemonContext_context_manager_enter_TestCase(scaffold.TestCase):
+ """ Test cases for DaemonContext.__enter__ method. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ setup_daemon_context_fixtures(self)
+ self.mock_tracker.clear()
+
+ scaffold.mock(
+ "daemon.daemon.DaemonContext.open",
+ tracker=self.mock_tracker)
+
+ def tearDown(self):
+ """ Tear down test fixtures. """
+ scaffold.mock_restore()
+
+ def test_opens_daemon_context(self):
+ """ Should open the DaemonContext. """
+ instance = self.test_instance
+ expect_mock_output = """\
+ Called daemon.daemon.DaemonContext.open()
+ """
+ instance.__enter__()
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_returns_self_instance(self):
+ """ Should return DaemonContext instance. """
+ instance = self.test_instance
+ expect_result = instance
+ result = instance.__enter__()
+ self.failUnlessIs(expect_result, result)
+
+
+class DaemonContext_context_manager_exit_TestCase(scaffold.TestCase):
+ """ Test cases for DaemonContext.__exit__ method. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ setup_daemon_context_fixtures(self)
+ self.mock_tracker.clear()
+
+ self.test_args = dict(
+ exc_type = object(),
+ exc_value = object(),
+ traceback = object(),
+ )
+
+ scaffold.mock(
+ "daemon.daemon.DaemonContext.close",
+ tracker=self.mock_tracker)
+
+ def tearDown(self):
+ """ Tear down test fixtures. """
+ scaffold.mock_restore()
+
+ def test_closes_daemon_context(self):
+ """ Should close the DaemonContext. """
+ instance = self.test_instance
+ args = self.test_args
+ expect_mock_output = """\
+ Called daemon.daemon.DaemonContext.close()
+ """
+ instance.__exit__(**args)
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_returns_none(self):
+ """ Should return None, indicating exception was not handled. """
+ instance = self.test_instance
+ args = self.test_args
+ expect_result = None
+ result = instance.__exit__(**args)
+ self.failUnlessIs(expect_result, result)
+
+
+class DaemonContext_terminate_TestCase(scaffold.TestCase):
+ """ Test cases for DaemonContext.terminate method. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ setup_daemon_context_fixtures(self)
+
+ self.test_signal = signal.SIGTERM
+ self.test_frame = None
+ self.test_args = (self.test_signal, self.test_frame)
+
+ def tearDown(self):
+ """ Tear down test fixtures. """
+ scaffold.mock_restore()
+
+ def test_raises_system_exit(self):
+ """ Should raise SystemExit. """
+ instance = self.test_instance
+ args = self.test_args
+ expect_exception = SystemExit
+ self.failUnlessRaises(
+ expect_exception,
+ instance.terminate, *args)
+
+ def test_exception_message_contains_signal_number(self):
+ """ Should raise exception with a message containing signal number. """
+ instance = self.test_instance
+ args = self.test_args
+ signal_number = self.test_signal
+ expect_exception = SystemExit
+ try:
+ instance.terminate(*args)
+ except expect_exception, exc:
+ pass
+ self.failUnlessIn(str(exc), str(signal_number))
+
+
+class DaemonContext_get_exclude_file_descriptors_TestCase(scaffold.TestCase):
+ """ Test cases for DaemonContext._get_exclude_file_descriptors function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ setup_daemon_context_fixtures(self)
+
+ self.test_files = {
+ 2: FakeFileDescriptorStringIO(),
+ 5: 5,
+ 11: FakeFileDescriptorStringIO(),
+ 17: None,
+ 23: FakeFileDescriptorStringIO(),
+ 37: 37,
+ 42: FakeFileDescriptorStringIO(),
+ }
+ for (fileno, item) in self.test_files.items():
+ if hasattr(item, '_fileno'):
+ item._fileno = fileno
+ self.test_file_descriptors = set(
+ fd for (fd, item) in self.test_files.items()
+ if item is not None)
+ self.test_file_descriptors.update(
+ self.stream_files_by_name[name].fileno()
+ for name in ['stdin', 'stdout', 'stderr']
+ )
+
+ def tearDown(self):
+ """ Tear down test fixtures. """
+ scaffold.mock_restore()
+
+ def test_returns_expected_file_descriptors(self):
+ """ Should return expected set of file descriptors. """
+ instance = self.test_instance
+ instance.files_preserve = self.test_files.values()
+ expect_result = self.test_file_descriptors
+ result = instance._get_exclude_file_descriptors()
+ self.failUnlessEqual(expect_result, result)
+
+ def test_returns_stream_redirects_if_no_files_preserve(self):
+ """ Should return only stream redirects if no files_preserve. """
+ instance = self.test_instance
+ instance.files_preserve = None
+ expect_result = set(
+ stream.fileno()
+ for stream in self.stream_files_by_name.values())
+ result = instance._get_exclude_file_descriptors()
+ self.failUnlessEqual(expect_result, result)
+
+ def test_returns_empty_set_if_no_files(self):
+ """ Should return empty set if no file options. """
+ instance = self.test_instance
+ for name in ['files_preserve', 'stdin', 'stdout', 'stderr']:
+ setattr(instance, name, None)
+ expect_result = set()
+ result = instance._get_exclude_file_descriptors()
+ self.failUnlessEqual(expect_result, result)
+
+ def test_return_set_omits_streams_without_file_descriptors(self):
+ """ Should omit any stream without a file descriptor. """
+ instance = self.test_instance
+ instance.files_preserve = self.test_files.values()
+ stream_files = self.stream_files_by_name
+ stream_names = stream_files.keys()
+ expect_result = self.test_file_descriptors.copy()
+ for (pseudo_stream_name, pseudo_stream) in stream_files.items():
+ setattr(instance, pseudo_stream_name, StringIO())
+ stream_fd = pseudo_stream.fileno()
+ expect_result.discard(stream_fd)
+ result = instance._get_exclude_file_descriptors()
+ self.failUnlessEqual(expect_result, result)
+
+
+class DaemonContext_make_signal_handler_TestCase(scaffold.TestCase):
+ """ Test cases for DaemonContext._make_signal_handler function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ setup_daemon_context_fixtures(self)
+
+ def tearDown(self):
+ """ Tear down test fixtures. """
+ scaffold.mock_restore()
+
+ def test_returns_ignore_for_none(self):
+ """ Should return SIG_IGN when None handler specified. """
+ instance = self.test_instance
+ target = None
+ expect_result = signal.SIG_IGN
+ result = instance._make_signal_handler(target)
+ self.failUnlessEqual(expect_result, result)
+
+ def test_returns_method_for_name(self):
+ """ Should return method of DaemonContext when name specified. """
+ instance = self.test_instance
+ target = 'terminate'
+ expect_result = instance.terminate
+ result = instance._make_signal_handler(target)
+ self.failUnlessEqual(expect_result, result)
+
+ def test_raises_error_for_unknown_name(self):
+ """ Should raise AttributeError for unknown method name. """
+ instance = self.test_instance
+ target = 'b0gUs'
+ expect_error = AttributeError
+ self.failUnlessRaises(
+ expect_error,
+ instance._make_signal_handler, target)
+
+ def test_returns_object_for_object(self):
+ """ Should return same object for any other object. """
+ instance = self.test_instance
+ target = object()
+ expect_result = target
+ result = instance._make_signal_handler(target)
+ self.failUnlessEqual(expect_result, result)
+
+
+class DaemonContext_make_signal_handler_map_TestCase(scaffold.TestCase):
+ """ Test cases for DaemonContext._make_signal_handler_map function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ setup_daemon_context_fixtures(self)
+
+ self.test_instance.signal_map = {
+ object(): object(),
+ object(): object(),
+ object(): object(),
+ }
+
+ self.test_signal_handlers = dict(
+ (key, object())
+ for key in self.test_instance.signal_map.values())
+ self.test_signal_handler_map = dict(
+ (key, self.test_signal_handlers[target])
+ for (key, target) in self.test_instance.signal_map.items())
+
+ def mock_make_signal_handler(target):
+ return self.test_signal_handlers[target]
+ scaffold.mock(
+ "daemon.daemon.DaemonContext._make_signal_handler",
+ returns_func=mock_make_signal_handler,
+ tracker=self.mock_tracker)
+
+ def tearDown(self):
+ """ Tear down test fixtures. """
+ scaffold.mock_restore()
+
+ def test_returns_constructed_signal_handler_items(self):
+ """ Should return items as constructed via make_signal_handler. """
+ instance = self.test_instance
+ expect_result = self.test_signal_handler_map
+ result = instance._make_signal_handler_map()
+ self.failUnlessEqual(expect_result, result)
+
+
+class change_working_directory_TestCase(scaffold.TestCase):
+ """ Test cases for change_working_directory function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ self.mock_tracker = scaffold.MockTracker()
+
+ scaffold.mock(
+ "os.chdir",
+ tracker=self.mock_tracker)
+
+ self.test_directory = object()
+ self.test_args = dict(
+ directory=self.test_directory,
+ )
+
+ def tearDown(self):
+ """ Tear down test fixtures. """
+ scaffold.mock_restore()
+
+ def test_changes_working_directory_to_specified_directory(self):
+ """ Should change working directory to specified directory. """
+ args = self.test_args
+ directory = self.test_directory
+ expect_mock_output = """\
+ Called os.chdir(%(directory)r)
+ """ % vars()
+ daemon.daemon.change_working_directory(**args)
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_raises_daemon_error_on_os_error(self):
+ """ Should raise a DaemonError on receiving and OSError. """
+ args = self.test_args
+ test_error = OSError(errno.ENOENT, "No such directory")
+ os.chdir.mock_raises = test_error
+ expect_error = daemon.daemon.DaemonOSEnvironmentError
+ self.failUnlessRaises(
+ expect_error,
+ daemon.daemon.change_working_directory, **args)
+
+ def test_error_message_contains_original_error_message(self):
+ """ Should raise a DaemonError with original message. """
+ args = self.test_args
+ test_error = OSError(errno.ENOENT, "No such directory")
+ os.chdir.mock_raises = test_error
+ expect_error = daemon.daemon.DaemonOSEnvironmentError
+ try:
+ daemon.daemon.change_working_directory(**args)
+ except expect_error, exc:
+ pass
+ self.failUnlessIn(str(exc), str(test_error))
+
+
+class change_root_directory_TestCase(scaffold.TestCase):
+ """ Test cases for change_root_directory function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ self.mock_tracker = scaffold.MockTracker()
+
+ scaffold.mock(
+ "os.chdir",
+ tracker=self.mock_tracker)
+ scaffold.mock(
+ "os.chroot",
+ tracker=self.mock_tracker)
+
+ self.test_directory = object()
+ self.test_args = dict(
+ directory=self.test_directory,
+ )
+
+ def tearDown(self):
+ """ Tear down test fixtures. """
+ scaffold.mock_restore()
+
+ def test_changes_working_directory_to_specified_directory(self):
+ """ Should change working directory to specified directory. """
+ args = self.test_args
+ directory = self.test_directory
+ expect_mock_output = """\
+ Called os.chdir(%(directory)r)
+ ...
+ """ % vars()
+ daemon.daemon.change_root_directory(**args)
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_changes_root_directory_to_specified_directory(self):
+ """ Should change root directory to specified directory. """
+ args = self.test_args
+ directory = self.test_directory
+ expect_mock_output = """\
+ ...
+ Called os.chroot(%(directory)r)
+ """ % vars()
+ daemon.daemon.change_root_directory(**args)
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_raises_daemon_error_on_os_error_from_chdir(self):
+ """ Should raise a DaemonError on receiving an OSError from chdir. """
+ args = self.test_args
+ test_error = OSError(errno.ENOENT, "No such directory")
+ os.chdir.mock_raises = test_error
+ expect_error = daemon.daemon.DaemonOSEnvironmentError
+ self.failUnlessRaises(
+ expect_error,
+ daemon.daemon.change_root_directory, **args)
+
+ def test_raises_daemon_error_on_os_error_from_chroot(self):
+ """ Should raise a DaemonError on receiving an OSError from chroot. """
+ args = self.test_args
+ test_error = OSError(errno.EPERM, "No chroot for you!")
+ os.chroot.mock_raises = test_error
+ expect_error = daemon.daemon.DaemonOSEnvironmentError
+ self.failUnlessRaises(
+ expect_error,
+ daemon.daemon.change_root_directory, **args)
+
+ def test_error_message_contains_original_error_message(self):
+ """ Should raise a DaemonError with original message. """
+ args = self.test_args
+ test_error = OSError(errno.ENOENT, "No such directory")
+ os.chdir.mock_raises = test_error
+ expect_error = daemon.daemon.DaemonOSEnvironmentError
+ try:
+ daemon.daemon.change_root_directory(**args)
+ except expect_error, exc:
+ pass
+ self.failUnlessIn(str(exc), str(test_error))
+
+
+class change_file_creation_mask_TestCase(scaffold.TestCase):
+ """ Test cases for change_file_creation_mask function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ self.mock_tracker = scaffold.MockTracker()
+
+ scaffold.mock(
+ "os.umask",
+ tracker=self.mock_tracker)
+
+ self.test_mask = object()
+ self.test_args = dict(
+ mask=self.test_mask,
+ )
+
+ def tearDown(self):
+ """ Tear down test fixtures. """
+ scaffold.mock_restore()
+
+ def test_changes_umask_to_specified_mask(self):
+ """ Should change working directory to specified directory. """
+ args = self.test_args
+ mask = self.test_mask
+ expect_mock_output = """\
+ Called os.umask(%(mask)r)
+ """ % vars()
+ daemon.daemon.change_file_creation_mask(**args)
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_raises_daemon_error_on_os_error_from_chdir(self):
+ """ Should raise a DaemonError on receiving an OSError from umask. """
+ args = self.test_args
+ test_error = OSError(errno.EINVAL, "Whatchoo talkin' 'bout?")
+ os.umask.mock_raises = test_error
+ expect_error = daemon.daemon.DaemonOSEnvironmentError
+ self.failUnlessRaises(
+ expect_error,
+ daemon.daemon.change_file_creation_mask, **args)
+
+ def test_error_message_contains_original_error_message(self):
+ """ Should raise a DaemonError with original message. """
+ args = self.test_args
+ test_error = OSError(errno.ENOENT, "No such directory")
+ os.umask.mock_raises = test_error
+ expect_error = daemon.daemon.DaemonOSEnvironmentError
+ try:
+ daemon.daemon.change_file_creation_mask(**args)
+ except expect_error, exc:
+ pass
+ self.failUnlessIn(str(exc), str(test_error))
+
+
+class change_process_owner_TestCase(scaffold.TestCase):
+ """ Test cases for change_process_owner function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ self.mock_tracker = scaffold.MockTracker()
+
+ scaffold.mock(
+ "os.setuid",
+ tracker=self.mock_tracker)
+ scaffold.mock(
+ "os.setgid",
+ tracker=self.mock_tracker)
+
+ self.test_uid = object()
+ self.test_gid = object()
+ self.test_args = dict(
+ uid=self.test_uid,
+ gid=self.test_gid,
+ )
+
+ def tearDown(self):
+ """ Tear down test fixtures. """
+ scaffold.mock_restore()
+
+ def test_changes_gid_and_uid_in_order(self):
+ """ Should change process GID and UID in correct order.
+
+ Since the process requires appropriate privilege to use
+ either of `setuid` or `setgid`, changing the UID must be
+ done last.
+
+ """
+ args = self.test_args
+ expect_mock_output = """\
+ Called os.setgid(...)
+ Called os.setuid(...)
+ """ % vars()
+ daemon.daemon.change_process_owner(**args)
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_changes_group_id_to_gid(self):
+ """ Should change process GID to specified value. """
+ args = self.test_args
+ gid = self.test_gid
+ expect_mock_output = """\
+ Called os.setgid(%(gid)r)
+ ...
+ """ % vars()
+ daemon.daemon.change_process_owner(**args)
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_changes_user_id_to_uid(self):
+ """ Should change process UID to specified value. """
+ args = self.test_args
+ uid = self.test_uid
+ expect_mock_output = """\
+ ...
+ Called os.setuid(%(uid)r)
+ """ % vars()
+ daemon.daemon.change_process_owner(**args)
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_raises_daemon_error_on_os_error_from_setgid(self):
+ """ Should raise a DaemonError on receiving an OSError from setgid. """
+ args = self.test_args
+ test_error = OSError(errno.EPERM, "No switching for you!")
+ os.setgid.mock_raises = test_error
+ expect_error = daemon.daemon.DaemonOSEnvironmentError
+ self.failUnlessRaises(
+ expect_error,
+ daemon.daemon.change_process_owner, **args)
+
+ def test_raises_daemon_error_on_os_error_from_setuid(self):
+ """ Should raise a DaemonError on receiving an OSError from setuid. """
+ args = self.test_args
+ test_error = OSError(errno.EPERM, "No switching for you!")
+ os.setuid.mock_raises = test_error
+ expect_error = daemon.daemon.DaemonOSEnvironmentError
+ self.failUnlessRaises(
+ expect_error,
+ daemon.daemon.change_process_owner, **args)
+
+ def test_error_message_contains_original_error_message(self):
+ """ Should raise a DaemonError with original message. """
+ args = self.test_args
+ test_error = OSError(errno.EINVAL, "Whatchoo talkin' 'bout?")
+ os.setuid.mock_raises = test_error
+ expect_error = daemon.daemon.DaemonOSEnvironmentError
+ try:
+ daemon.daemon.change_process_owner(**args)
+ except expect_error, exc:
+ pass
+ self.failUnlessIn(str(exc), str(test_error))
+
+
+class prevent_core_dump_TestCase(scaffold.TestCase):
+ """ Test cases for prevent_core_dump function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ self.mock_tracker = scaffold.MockTracker()
+
+ self.RLIMIT_CORE = object()
+ scaffold.mock(
+ "resource.RLIMIT_CORE", mock_obj=self.RLIMIT_CORE,
+ tracker=self.mock_tracker)
+ scaffold.mock(
+ "resource.getrlimit", returns=None,
+ tracker=self.mock_tracker)
+ scaffold.mock(
+ "resource.setrlimit", returns=None,
+ tracker=self.mock_tracker)
+
+ def tearDown(self):
+ """ Tear down test fixtures. """
+ scaffold.mock_restore()
+
+ def test_sets_core_limit_to_zero(self):
+ """ Should set the RLIMIT_CORE resource to zero. """
+ expect_resource = self.RLIMIT_CORE
+ expect_limit = (0, 0)
+ expect_mock_output = """\
+ Called resource.getrlimit(
+ %(expect_resource)r)
+ Called resource.setrlimit(
+ %(expect_resource)r,
+ %(expect_limit)r)
+ """ % vars()
+ daemon.daemon.prevent_core_dump()
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_raises_error_when_no_core_resource(self):
+ """ Should raise DaemonError if no RLIMIT_CORE resource. """
+ def mock_getrlimit(res):
+ if res == resource.RLIMIT_CORE:
+ raise ValueError("Bogus platform doesn't have RLIMIT_CORE")
+ else:
+ return None
+ resource.getrlimit.mock_returns_func = mock_getrlimit
+ expect_error = daemon.daemon.DaemonOSEnvironmentError
+ self.failUnlessRaises(
+ expect_error,
+ daemon.daemon.prevent_core_dump)
+
+
+class close_file_descriptor_if_open_TestCase(scaffold.TestCase):
+ """ Test cases for close_file_descriptor_if_open function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ self.mock_tracker = scaffold.MockTracker()
+
+ self.test_fd = 274
+
+ scaffold.mock(
+ "os.close",
+ tracker=self.mock_tracker)
+
+ def tearDown(self):
+ """ Tear down test fixtures. """
+ scaffold.mock_restore()
+
+ def test_requests_file_descriptor_close(self):
+ """ Should request close of file descriptor. """
+ fd = self.test_fd
+ expect_mock_output = """\
+ Called os.close(%(fd)r)
+ """ % vars()
+ daemon.daemon.close_file_descriptor_if_open(fd)
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_ignores_badfd_error_on_close(self):
+ """ Should ignore OSError EBADF when closing. """
+ fd = self.test_fd
+ test_error = OSError(errno.EBADF, "Bad file descriptor")
+ def os_close(fd):
+ raise test_error
+ os.close.mock_returns_func = os_close
+ expect_mock_output = """\
+ Called os.close(%(fd)r)
+ """ % vars()
+ daemon.daemon.close_file_descriptor_if_open(fd)
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_raises_error_if_error_on_close(self):
+ """ Should raise DaemonError if an OSError occurs when closing. """
+ fd = self.test_fd
+ test_error = OSError(object(), "Unexpected error")
+ def os_close(fd):
+ raise test_error
+ os.close.mock_returns_func = os_close
+ expect_error = daemon.daemon.DaemonOSEnvironmentError
+ self.failUnlessRaises(
+ expect_error,
+ daemon.daemon.close_file_descriptor_if_open, fd)
+
+
+class maxfd_TestCase(scaffold.TestCase):
+ """ Test cases for module MAXFD constant. """
+
+ def test_positive(self):
+ """ Should be a positive number. """
+ maxfd = daemon.daemon.MAXFD
+ self.failUnless(maxfd > 0)
+
+ def test_integer(self):
+ """ Should be an integer. """
+ maxfd = daemon.daemon.MAXFD
+ self.failUnlessEqual(int(maxfd), maxfd)
+
+ def test_reasonably_high(self):
+ """ Should be reasonably high for default open files limit.
+
+ If the system reports a limit of “infinity” on maximum
+ file descriptors, we still need a finite number in order
+ to close “all” of them. Ensure this is reasonably high
+ to catch most use cases.
+
+ """
+ expect_minimum = 2048
+ maxfd = daemon.daemon.MAXFD
+ self.failUnless(
+ expect_minimum <= maxfd,
+ msg="MAXFD should be at least %(expect_minimum)r (got %(maxfd)r)"
+ % vars())
+
+
+class get_maximum_file_descriptors_TestCase(scaffold.TestCase):
+ """ Test cases for get_maximum_file_descriptors function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ self.mock_tracker = scaffold.MockTracker()
+
+ self.RLIMIT_NOFILE = object()
+ self.RLIM_INFINITY = object()
+ self.test_rlimit_nofile = 2468
+
+ def mock_getrlimit(resource):
+ result = (object(), self.test_rlimit_nofile)
+ if resource != self.RLIMIT_NOFILE:
+ result = NotImplemented
+ return result
+
+ self.test_maxfd = object()
+ scaffold.mock(
+ "daemon.daemon.MAXFD", mock_obj=self.test_maxfd,
+ tracker=self.mock_tracker)
+
+ scaffold.mock(
+ "resource.RLIMIT_NOFILE", mock_obj=self.RLIMIT_NOFILE,
+ tracker=self.mock_tracker)
+ scaffold.mock(
+ "resource.RLIM_INFINITY", mock_obj=self.RLIM_INFINITY,
+ tracker=self.mock_tracker)
+ scaffold.mock(
+ "resource.getrlimit", returns_func=mock_getrlimit,
+ tracker=self.mock_tracker)
+
+ def tearDown(self):
+ """ Tear down test fixtures. """
+ scaffold.mock_restore()
+
+ def test_returns_system_hard_limit(self):
+ """ Should return process hard limit on number of files. """
+ expect_result = self.test_rlimit_nofile
+ result = daemon.daemon.get_maximum_file_descriptors()
+ self.failUnlessEqual(expect_result, result)
+
+ def test_returns_module_default_if_hard_limit_infinity(self):
+ """ Should return module MAXFD if hard limit is infinity. """
+ self.test_rlimit_nofile = self.RLIM_INFINITY
+ expect_result = self.test_maxfd
+ result = daemon.daemon.get_maximum_file_descriptors()
+ self.failUnlessEqual(expect_result, result)
+
+
+class close_all_open_files_TestCase(scaffold.TestCase):
+ """ Test cases for close_all_open_files function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ self.mock_tracker = scaffold.MockTracker()
+
+ self.RLIMIT_NOFILE = object()
+ self.RLIM_INFINITY = object()
+ self.test_rlimit_nofile = self.RLIM_INFINITY
+
+ def mock_getrlimit(resource):
+ result = (self.test_rlimit_nofile, object())
+ if resource != self.RLIMIT_NOFILE:
+ result = NotImplemented
+ return result
+
+ self.test_maxfd = 8
+ scaffold.mock(
+ "daemon.daemon.get_maximum_file_descriptors",
+ returns=self.test_maxfd,
+ tracker=self.mock_tracker)
+
+ scaffold.mock(
+ "resource.RLIMIT_NOFILE", mock_obj=self.RLIMIT_NOFILE,
+ tracker=self.mock_tracker)
+ scaffold.mock(
+ "resource.RLIM_INFINITY", mock_obj=self.RLIM_INFINITY,
+ tracker=self.mock_tracker)
+ scaffold.mock(
+ "resource.getrlimit", returns_func=mock_getrlimit,
+ tracker=self.mock_tracker)
+
+ scaffold.mock(
+ "daemon.daemon.close_file_descriptor_if_open",
+ tracker=self.mock_tracker)
+
+ def tearDown(self):
+ """ Tear down test fixtures. """
+ scaffold.mock_restore()
+
+ def test_requests_all_open_files_to_close(self):
+ """ Should request close of all open files. """
+ expect_file_descriptors = reversed(range(self.test_maxfd))
+ expect_mock_output = "...\n" + "".join(
+ "Called daemon.daemon.close_file_descriptor_if_open(%(fd)r)\n"
+ % vars()
+ for fd in expect_file_descriptors)
+ daemon.daemon.close_all_open_files()
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_requests_all_but_excluded_files_to_close(self):
+ """ Should request close of all open files but those excluded. """
+ test_exclude = set([3, 7])
+ args = dict(
+ exclude = test_exclude,
+ )
+ expect_file_descriptors = (
+ fd for fd in reversed(range(self.test_maxfd))
+ if fd not in test_exclude)
+ expect_mock_output = "...\n" + "".join(
+ "Called daemon.daemon.close_file_descriptor_if_open(%(fd)r)\n"
+ % vars()
+ for fd in expect_file_descriptors)
+ daemon.daemon.close_all_open_files(**args)
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+
+class detach_process_context_TestCase(scaffold.TestCase):
+ """ Test cases for detach_process_context function. """
+
+ class FakeOSExit(SystemExit):
+ """ Fake exception raised for os._exit(). """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ self.mock_tracker = scaffold.MockTracker()
+
+ test_pids = [0, 0]
+ scaffold.mock(
+ "os.fork", returns_iter=test_pids,
+ tracker=self.mock_tracker)
+ scaffold.mock(
+ "os.setsid",
+ tracker=self.mock_tracker)
+
+ def raise_os_exit(status=None):
+ raise self.FakeOSExit(status)
+
+ scaffold.mock(
+ "os._exit", returns_func=raise_os_exit,
+ tracker=self.mock_tracker)
+
+ def tearDown(self):
+ """ Tear down test fixtures. """
+ scaffold.mock_restore()
+
+ def test_parent_exits(self):
+ """ Parent process should exit. """
+ parent_pid = 23
+ scaffold.mock("os.fork", returns_iter=[parent_pid],
+ tracker=self.mock_tracker)
+ expect_mock_output = """\
+ Called os.fork()
+ Called os._exit(0)
+ """
+ self.failUnlessRaises(
+ self.FakeOSExit,
+ daemon.daemon.detach_process_context)
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_first_fork_error_raises_error(self):
+ """ Error on first fork should raise DaemonProcessDetachError. """
+ fork_errno = 13
+ fork_strerror = "Bad stuff happened"
+ fork_error = OSError(fork_errno, fork_strerror)
+ test_pids_iter = iter([fork_error])
+
+ def mock_fork():
+ next = test_pids_iter.next()
+ if isinstance(next, Exception):
+ raise next
+ else:
+ return next
+
+ scaffold.mock("os.fork", returns_func=mock_fork,
+ tracker=self.mock_tracker)
+ expect_mock_output = """\
+ Called os.fork()
+ """
+ self.failUnlessRaises(
+ daemon.daemon.DaemonProcessDetachError,
+ daemon.daemon.detach_process_context)
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_child_starts_new_process_group(self):
+ """ Child should start new process group. """
+ expect_mock_output = """\
+ Called os.fork()
+ Called os.setsid()
+ ...
+ """
+ daemon.daemon.detach_process_context()
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_child_forks_next_parent_exits(self):
+ """ Child should fork, then exit if parent. """
+ test_pids = [0, 42]
+ scaffold.mock("os.fork", returns_iter=test_pids,
+ tracker=self.mock_tracker)
+ expect_mock_output = """\
+ Called os.fork()
+ Called os.setsid()
+ Called os.fork()
+ Called os._exit(0)
+ """
+ self.failUnlessRaises(
+ self.FakeOSExit,
+ daemon.daemon.detach_process_context)
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_second_fork_error_reports_to_stderr(self):
+ """ Error on second fork should cause report to stderr. """
+ fork_errno = 17
+ fork_strerror = "Nasty stuff happened"
+ fork_error = OSError(fork_errno, fork_strerror)
+ test_pids_iter = iter([0, fork_error])
+
+ def mock_fork():
+ next = test_pids_iter.next()
+ if isinstance(next, Exception):
+ raise next
+ else:
+ return next
+
+ scaffold.mock("os.fork", returns_func=mock_fork,
+ tracker=self.mock_tracker)
+ expect_mock_output = """\
+ Called os.fork()
+ Called os.setsid()
+ Called os.fork()
+ """
+ self.failUnlessRaises(
+ daemon.daemon.DaemonProcessDetachError,
+ daemon.daemon.detach_process_context)
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_child_forks_next_child_continues(self):
+ """ Child should fork, then continue if child. """
+ expect_mock_output = """\
+ Called os.fork()
+ Called os.setsid()
+ Called os.fork()
+ """ % vars()
+ daemon.daemon.detach_process_context()
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+
+class is_process_started_by_init_TestCase(scaffold.TestCase):
+ """ Test cases for is_process_started_by_init function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ self.mock_tracker = scaffold.MockTracker()
+
+ self.test_ppid = 765
+
+ scaffold.mock(
+ "os.getppid",
+ returns=self.test_ppid,
+ tracker=self.mock_tracker)
+
+ def tearDown(self):
+ """ Tear down test fixtures. """
+ scaffold.mock_restore()
+
+ def test_returns_false_by_default(self):
+ """ Should return False under normal circumstances. """
+ expect_result = False
+ result = daemon.daemon.is_process_started_by_init()
+ self.failUnlessIs(expect_result, result)
+
+ def test_returns_true_if_parent_process_is_init(self):
+ """ Should return True if parent process is `init`. """
+ init_pid = 1
+ os.getppid.mock_returns = init_pid
+ expect_result = True
+ result = daemon.daemon.is_process_started_by_init()
+ self.failUnlessIs(expect_result, result)
+
+
+class is_socket_TestCase(scaffold.TestCase):
+ """ Test cases for is_socket function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ self.mock_tracker = scaffold.MockTracker()
+
+ def mock_getsockopt(level, optname, buflen=None):
+ result = object()
+ if optname is socket.SO_TYPE:
+ result = socket.SOCK_RAW
+ return result
+
+ self.mock_socket_getsockopt_func = mock_getsockopt
+
+ self.mock_socket_error = socket.error(
+ errno.ENOTSOCK,
+ "Socket operation on non-socket")
+
+ self.mock_socket = scaffold.Mock(
+ "socket.socket",
+ tracker=self.mock_tracker)
+ self.mock_socket.getsockopt.mock_raises = self.mock_socket_error
+
+ def mock_socket_fromfd(fd, family, type, proto=None):
+ return self.mock_socket
+
+ scaffold.mock(
+ "socket.fromfd",
+ returns_func=mock_socket_fromfd,
+ tracker=self.mock_tracker)
+
+ def tearDown(self):
+ """ Tear down test fixtures. """
+ scaffold.mock_restore()
+
+ def test_returns_false_by_default(self):
+ """ Should return False under normal circumstances. """
+ test_fd = 23
+ expect_result = False
+ result = daemon.daemon.is_socket(test_fd)
+ self.failUnlessIs(expect_result, result)
+
+ def test_returns_true_if_stdin_is_socket(self):
+ """ Should return True if `stdin` is a socket. """
+ test_fd = 23
+ getsockopt = self.mock_socket.getsockopt
+ getsockopt.mock_raises = None
+ getsockopt.mock_returns_func = self.mock_socket_getsockopt_func
+ expect_result = True
+ result = daemon.daemon.is_socket(test_fd)
+ self.failUnlessIs(expect_result, result)
+
+ def test_returns_false_if_stdin_socket_raises_error(self):
+ """ Should return True if `stdin` is a socket and raises error. """
+ test_fd = 23
+ getsockopt = self.mock_socket.getsockopt
+ getsockopt.mock_raises = socket.error(
+ object(), "Weird socket stuff")
+ expect_result = True
+ result = daemon.daemon.is_socket(test_fd)
+ self.failUnlessIs(expect_result, result)
+
+
+class is_process_started_by_superserver_TestCase(scaffold.TestCase):
+ """ Test cases for is_process_started_by_superserver function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ self.mock_tracker = scaffold.MockTracker()
+
+ def mock_is_socket(fd):
+ if sys.__stdin__.fileno() == fd:
+ result = self.mock_stdin_is_socket_func()
+ else:
+ result = False
+ return result
+
+ self.mock_stdin_is_socket_func = (lambda: False)
+
+ scaffold.mock(
+ "daemon.daemon.is_socket",
+ returns_func=mock_is_socket,
+ tracker=self.mock_tracker)
+
+ def tearDown(self):
+ """ Tear down test fixtures. """
+ scaffold.mock_restore()
+
+ def test_returns_false_by_default(self):
+ """ Should return False under normal circumstances. """
+ expect_result = False
+ result = daemon.daemon.is_process_started_by_superserver()
+ self.failUnlessIs(expect_result, result)
+
+ def test_returns_true_if_stdin_is_socket(self):
+ """ Should return True if `stdin` is a socket. """
+ self.mock_stdin_is_socket_func = (lambda: True)
+ expect_result = True
+ result = daemon.daemon.is_process_started_by_superserver()
+ self.failUnlessIs(expect_result, result)
+
+
+class is_detach_process_context_required_TestCase(scaffold.TestCase):
+ """ Test cases for is_detach_process_context_required function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ self.mock_tracker = scaffold.MockTracker()
+
+ scaffold.mock(
+ "daemon.daemon.is_process_started_by_init",
+ tracker=self.mock_tracker)
+ scaffold.mock(
+ "daemon.daemon.is_process_started_by_superserver",
+ tracker=self.mock_tracker)
+
+ def tearDown(self):
+ """ Tear down test fixtures. """
+ scaffold.mock_restore()
+
+ def test_returns_true_by_default(self):
+ """ Should return False under normal circumstances. """
+ expect_result = True
+ result = daemon.daemon.is_detach_process_context_required()
+ self.failUnlessIs(expect_result, result)
+
+ def test_returns_false_if_started_by_init(self):
+ """ Should return False if current process started by init. """
+ daemon.daemon.is_process_started_by_init.mock_returns = True
+ expect_result = False
+ result = daemon.daemon.is_detach_process_context_required()
+ self.failUnlessIs(expect_result, result)
+
+ def test_returns_true_if_started_by_superserver(self):
+ """ Should return False if current process started by superserver. """
+ daemon.daemon.is_process_started_by_superserver.mock_returns = True
+ expect_result = False
+ result = daemon.daemon.is_detach_process_context_required()
+ self.failUnlessIs(expect_result, result)
+
+
+def setup_streams_fixtures(testcase):
+ """ Set up common test fixtures for standard streams. """
+ testcase.mock_tracker = scaffold.MockTracker()
+
+ testcase.stream_file_paths = dict(
+ stdin = tempfile.mktemp(),
+ stdout = tempfile.mktemp(),
+ stderr = tempfile.mktemp(),
+ )
+
+ testcase.stream_files_by_name = dict(
+ (name, FakeFileDescriptorStringIO())
+ for name in ['stdin', 'stdout', 'stderr']
+ )
+
+ testcase.stream_files_by_path = dict(
+ (testcase.stream_file_paths[name],
+ testcase.stream_files_by_name[name])
+ for name in ['stdin', 'stdout', 'stderr']
+ )
+
+ scaffold.mock(
+ "os.dup2",
+ tracker=testcase.mock_tracker)
+
+
+class redirect_stream_TestCase(scaffold.TestCase):
+ """ Test cases for redirect_stream function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ setup_streams_fixtures(self)
+
+ self.test_system_stream = FakeFileDescriptorStringIO()
+ self.test_target_stream = FakeFileDescriptorStringIO()
+ self.test_null_file = FakeFileDescriptorStringIO()
+
+ def mock_open(path, flag, mode=None):
+ if path == os.devnull:
+ result = self.test_null_file.fileno()
+ else:
+ raise OSError(errno.NOENT, "No such file", path)
+ return result
+
+ scaffold.mock(
+ "os.open",
+ returns_func=mock_open,
+ tracker=self.mock_tracker)
+
+ def tearDown(self):
+ """ Tear down test fixtures. """
+ scaffold.mock_restore()
+
+ def test_duplicates_target_file_descriptor(self):
+ """ Should duplicate file descriptor from target to system stream. """
+ system_stream = self.test_system_stream
+ system_fileno = system_stream.fileno()
+ target_stream = self.test_target_stream
+ target_fileno = target_stream.fileno()
+ expect_mock_output = """\
+ Called os.dup2(%(target_fileno)r, %(system_fileno)r)
+ """ % vars()
+ daemon.daemon.redirect_stream(system_stream, target_stream)
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_duplicates_null_file_descriptor_by_default(self):
+ """ Should by default duplicate the null file to the system stream. """
+ system_stream = self.test_system_stream
+ system_fileno = system_stream.fileno()
+ target_stream = None
+ null_path = os.devnull
+ null_flag = os.O_RDWR
+ null_file = self.test_null_file
+ null_fileno = null_file.fileno()
+ expect_mock_output = """\
+ Called os.open(%(null_path)r, %(null_flag)r)
+ Called os.dup2(%(null_fileno)r, %(system_fileno)r)
+ """ % vars()
+ daemon.daemon.redirect_stream(system_stream, target_stream)
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+
+class make_default_signal_map_TestCase(scaffold.TestCase):
+ """ Test cases for make_default_signal_map function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ self.mock_tracker = scaffold.MockTracker()
+
+ mock_signal_module = ModuleType('signal')
+ mock_signal_names = [
+ 'SIGHUP',
+ 'SIGCLD',
+ 'SIGSEGV',
+ 'SIGTSTP',
+ 'SIGTTIN',
+ 'SIGTTOU',
+ 'SIGTERM',
+ ]
+ for name in mock_signal_names:
+ setattr(mock_signal_module, name, object())
+
+ scaffold.mock(
+ "signal",
+ mock_obj=mock_signal_module,
+ tracker=self.mock_tracker)
+ scaffold.mock(
+ "daemon.daemon.signal",
+ mock_obj=mock_signal_module,
+ tracker=self.mock_tracker)
+
+ default_signal_map_by_name = {
+ 'SIGTSTP': None,
+ 'SIGTTIN': None,
+ 'SIGTTOU': None,
+ 'SIGTERM': 'terminate',
+ }
+
+ self.default_signal_map = dict(
+ (getattr(signal, name), target)
+ for (name, target) in default_signal_map_by_name.items())
+
+ def tearDown(self):
+ """ Tear down test fixtures. """
+ scaffold.mock_restore()
+
+ def test_returns_constructed_signal_map(self):
+ """ Should return map per default. """
+ expect_result = self.default_signal_map
+ result = daemon.daemon.make_default_signal_map()
+ self.failUnlessEqual(expect_result, result)
+
+ def test_returns_signal_map_with_only_ids_in_signal_module(self):
+ """ Should return map with only signals in the `signal` module.
+
+ The `signal` module is documented to only define those
+ signals which exist on the running system. Therefore the
+ default map should not contain any signals which are not
+ defined in the `signal` module.
+
+ """
+ del(self.default_signal_map[signal.SIGTTOU])
+ del(signal.SIGTTOU)
+ expect_result = self.default_signal_map
+ result = daemon.daemon.make_default_signal_map()
+ self.failUnlessEqual(expect_result, result)
+
+
+class set_signal_handlers_TestCase(scaffold.TestCase):
+ """ Test cases for set_signal_handlers function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ self.mock_tracker = scaffold.MockTracker()
+
+ scaffold.mock(
+ "signal.signal",
+ tracker=self.mock_tracker)
+
+ self.signal_handler_map = {
+ signal.SIGQUIT: object(),
+ signal.SIGSEGV: object(),
+ signal.SIGINT: object(),
+ }
+
+ def tearDown(self):
+ """ Tear down test fixtures. """
+ scaffold.mock_restore()
+
+ def test_sets_signal_handler_for_each_item(self):
+ """ Should set signal handler for each item in map. """
+ signal_handler_map = self.signal_handler_map
+ expect_mock_output = "".join(
+ "Called signal.signal(%(signal_number)r, %(handler)r)\n"
+ % vars()
+ for (signal_number, handler) in signal_handler_map.items())
+ daemon.daemon.set_signal_handlers(signal_handler_map)
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+
+class register_atexit_function_TestCase(scaffold.TestCase):
+ """ Test cases for register_atexit_function function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ self.mock_tracker = scaffold.MockTracker()
+
+ scaffold.mock(
+ "atexit.register",
+ tracker=self.mock_tracker)
+
+ def tearDown(self):
+ """ Tear down test fixtures. """
+ scaffold.mock_restore()
+
+ def test_registers_function_for_atexit_processing(self):
+ """ Should register specified function for atexit processing. """
+ func = object()
+ expect_mock_output = """\
+ Called atexit.register(%(func)r)
+ """ % vars()
+ daemon.daemon.register_atexit_function(func)
+ self.failUnlessMockCheckerMatch(expect_mock_output)
diff --git a/test/test_pidlockfile.py b/test/test_pidlockfile.py
new file mode 100644
index 0000000..c8f952e
--- /dev/null
+++ b/test/test_pidlockfile.py
@@ -0,0 +1,791 @@
+# -*- coding: utf-8 -*-
+#
+# test/test_pidlockfile.py
+# Part of python-daemon, an implementation of PEP 3143.
+#
+# Copyright © 2008–2010 Ben Finney <ben+python@benfinney.id.au>
+#
+# This is free software: you may copy, modify, and/or distribute this work
+# under the terms of the Python Software Foundation License, version 2 or
+# later as published by the Python Software Foundation.
+# No warranty expressed or implied. See the file LICENSE.PSF-2 for details.
+
+""" Unit test for pidlockfile module.
+ """
+
+import __builtin__
+import os
+from StringIO import StringIO
+import itertools
+import tempfile
+import errno
+
+import lockfile
+
+import scaffold
+from daemon import pidlockfile
+
+
+class FakeFileDescriptorStringIO(StringIO, object):
+ """ A StringIO class that fakes a file descriptor. """
+
+ _fileno_generator = itertools.count()
+
+ def __init__(self, *args, **kwargs):
+ self._fileno = self._fileno_generator.next()
+ super_instance = super(FakeFileDescriptorStringIO, self)
+ super_instance.__init__(*args, **kwargs)
+
+ def fileno(self):
+ return self._fileno
+
+
+class Exception_TestCase(scaffold.Exception_TestCase):
+ """ Test cases for module exception classes. """
+
+ def __init__(self, *args, **kwargs):
+ """ Set up a new instance. """
+ super(Exception_TestCase, self).__init__(*args, **kwargs)
+
+ self.valid_exceptions = {
+ pidlockfile.PIDFileError: dict(
+ min_args = 1,
+ types = (Exception,),
+ ),
+ pidlockfile.PIDFileParseError: dict(
+ min_args = 2,
+ types = (pidlockfile.PIDFileError, ValueError),
+ ),
+ }
+
+
+def make_pidlockfile_scenarios():
+ """ Make a collection of scenarios for testing PIDLockFile instances. """
+
+ mock_current_pid = 235
+ mock_other_pid = 8642
+ mock_pidfile_path = tempfile.mktemp()
+
+ mock_pidfile_empty = FakeFileDescriptorStringIO()
+ mock_pidfile_current_pid = FakeFileDescriptorStringIO(
+ "%(mock_current_pid)d\n" % vars())
+ mock_pidfile_other_pid = FakeFileDescriptorStringIO(
+ "%(mock_other_pid)d\n" % vars())
+ mock_pidfile_bogus = FakeFileDescriptorStringIO(
+ "b0gUs")
+
+ scenarios = {
+ 'simple': {},
+ 'not-exist': {
+ 'open_func_name': 'mock_open_nonexist',
+ 'os_open_func_name': 'mock_os_open_nonexist',
+ },
+ 'not-exist-write-denied': {
+ 'open_func_name': 'mock_open_nonexist',
+ 'os_open_func_name': 'mock_os_open_nonexist',
+ },
+ 'not-exist-write-busy': {
+ 'open_func_name': 'mock_open_nonexist',
+ 'os_open_func_name': 'mock_os_open_nonexist',
+ },
+ 'exist-read-denied': {
+ 'open_func_name': 'mock_open_read_denied',
+ 'os_open_func_name': 'mock_os_open_read_denied',
+ },
+ 'exist-locked-read-denied': {
+ 'locking_pid': mock_other_pid,
+ 'open_func_name': 'mock_open_read_denied',
+ 'os_open_func_name': 'mock_os_open_read_denied',
+ },
+ 'exist-empty': {},
+ 'exist-invalid': {
+ 'pidfile': mock_pidfile_bogus,
+ },
+ 'exist-current-pid': {
+ 'pidfile': mock_pidfile_current_pid,
+ 'pidfile_pid': mock_current_pid,
+ },
+ 'exist-current-pid-locked': {
+ 'pidfile': mock_pidfile_current_pid,
+ 'pidfile_pid': mock_current_pid,
+ 'locking_pid': mock_current_pid,
+ },
+ 'exist-other-pid': {
+ 'pidfile': mock_pidfile_other_pid,
+ 'pidfile_pid': mock_other_pid,
+ },
+ 'exist-other-pid-locked': {
+ 'pidfile': mock_pidfile_other_pid,
+ 'pidfile_pid': mock_other_pid,
+ 'locking_pid': mock_other_pid,
+ },
+ }
+
+ for scenario in scenarios.values():
+ scenario['pid'] = mock_current_pid
+ scenario['path'] = mock_pidfile_path
+ if 'pidfile' not in scenario:
+ scenario['pidfile'] = mock_pidfile_empty
+ if 'pidfile_pid' not in scenario:
+ scenario['pidfile_pid'] = None
+ if 'locking_pid' not in scenario:
+ scenario['locking_pid'] = None
+ if 'open_func_name' not in scenario:
+ scenario['open_func_name'] = 'mock_open_okay'
+ if 'os_open_func_name' not in scenario:
+ scenario['os_open_func_name'] = 'mock_os_open_okay'
+
+ return scenarios
+
+
+def setup_pidfile_fixtures(testcase):
+ """ Set up common fixtures for PID file test cases. """
+ testcase.mock_tracker = scaffold.MockTracker()
+
+ scenarios = make_pidlockfile_scenarios()
+ testcase.pidlockfile_scenarios = scenarios
+
+ def get_scenario_option(testcase, key, default=None):
+ value = default
+ try:
+ value = testcase.scenario[key]
+ except (NameError, TypeError, AttributeError, KeyError):
+ pass
+ return value
+
+ scaffold.mock(
+ "os.getpid",
+ returns=scenarios['simple']['pid'],
+ tracker=testcase.mock_tracker)
+
+ def make_mock_open_funcs(testcase):
+
+ def mock_open_nonexist(filename, mode, buffering):
+ if 'r' in mode:
+ raise IOError(
+ errno.ENOENT, "No such file %(filename)r" % vars())
+ else:
+ result = testcase.scenario['pidfile']
+ return result
+
+ def mock_open_read_denied(filename, mode, buffering):
+ if 'r' in mode:
+ raise IOError(
+ errno.EPERM, "Read denied on %(filename)r" % vars())
+ else:
+ result = testcase.scenario['pidfile']
+ return result
+
+ def mock_open_okay(filename, mode, buffering):
+ result = testcase.scenario['pidfile']
+ return result
+
+ def mock_os_open_nonexist(filename, flags, mode):
+ if (flags & os.O_CREAT):
+ result = testcase.scenario['pidfile'].fileno()
+ else:
+ raise OSError(
+ errno.ENOENT, "No such file %(filename)r" % vars())
+ return result
+
+ def mock_os_open_read_denied(filename, flags, mode):
+ if (flags & os.O_CREAT):
+ result = testcase.scenario['pidfile'].fileno()
+ else:
+ raise OSError(
+ errno.EPERM, "Read denied on %(filename)r" % vars())
+ return result
+
+ def mock_os_open_okay(filename, flags, mode):
+ result = testcase.scenario['pidfile'].fileno()
+ return result
+
+ funcs = dict(
+ (name, obj) for (name, obj) in vars().items()
+ if hasattr(obj, '__call__'))
+
+ return funcs
+
+ testcase.mock_pidfile_open_funcs = make_mock_open_funcs(testcase)
+
+ def mock_open(filename, mode='r', buffering=None):
+ scenario_path = get_scenario_option(testcase, 'path')
+ if filename == scenario_path:
+ func_name = testcase.scenario['open_func_name']
+ mock_open_func = testcase.mock_pidfile_open_funcs[func_name]
+ result = mock_open_func(filename, mode, buffering)
+ else:
+ result = FakeFileDescriptorStringIO()
+ return result
+
+ scaffold.mock(
+ "__builtin__.open",
+ returns_func=mock_open,
+ tracker=testcase.mock_tracker)
+
+ def mock_os_open(filename, flags, mode=None):
+ scenario_path = get_scenario_option(testcase, 'path')
+ if filename == scenario_path:
+ func_name = testcase.scenario['os_open_func_name']
+ mock_os_open_func = testcase.mock_pidfile_open_funcs[func_name]
+ result = mock_os_open_func(filename, flags, mode)
+ else:
+ result = FakeFileDescriptorStringIO().fileno()
+ return result
+
+ scaffold.mock(
+ "os.open",
+ returns_func=mock_os_open,
+ tracker=testcase.mock_tracker)
+
+ def mock_os_fdopen(fd, mode='r', buffering=None):
+ scenario_pidfile = get_scenario_option(
+ testcase, 'pidfile', FakeFileDescriptorStringIO())
+ if fd == testcase.scenario['pidfile'].fileno():
+ result = testcase.scenario['pidfile']
+ else:
+ raise OSError(errno.EBADF, "Bad file descriptor")
+ return result
+
+ scaffold.mock(
+ "os.fdopen",
+ returns_func=mock_os_fdopen,
+ tracker=testcase.mock_tracker)
+
+ testcase.scenario = NotImplemented
+
+
+def setup_lockfile_method_mocks(testcase, scenario, class_name):
+ """ Set up common mock methods for lockfile class. """
+
+ def mock_read_pid():
+ return scenario['pidfile_pid']
+ def mock_is_locked():
+ return (scenario['locking_pid'] is not None)
+ def mock_i_am_locking():
+ return (
+ scenario['locking_pid'] == scenario['pid'])
+ def mock_acquire(timeout=None):
+ if scenario['locking_pid'] is not None:
+ raise lockfile.AlreadyLocked()
+ scenario['locking_pid'] = scenario['pid']
+ def mock_release():
+ if scenario['locking_pid'] is None:
+ raise lockfile.NotLocked()
+ if scenario['locking_pid'] != scenario['pid']:
+ raise lockfile.NotMyLock()
+ scenario['locking_pid'] = None
+ def mock_break_lock():
+ scenario['locking_pid'] = None
+
+ for func_name in [
+ 'read_pid',
+ 'is_locked', 'i_am_locking',
+ 'acquire', 'release', 'break_lock',
+ ]:
+ mock_func = vars()["mock_%(func_name)s" % vars()]
+ lockfile_func_name = "%(class_name)s.%(func_name)s" % vars()
+ mock_lockfile_func = scaffold.Mock(
+ lockfile_func_name,
+ returns_func=mock_func,
+ tracker=testcase.mock_tracker)
+ try:
+ scaffold.mock(
+ lockfile_func_name,
+ mock_obj=mock_lockfile_func,
+ tracker=testcase.mock_tracker)
+ except NameError:
+ pass
+
+
+def setup_pidlockfile_fixtures(testcase, scenario_name=None):
+ """ Set up common fixtures for PIDLockFile test cases. """
+
+ setup_pidfile_fixtures(testcase)
+
+ scaffold.mock(
+ "pidlockfile.write_pid_to_pidfile",
+ tracker=testcase.mock_tracker)
+ scaffold.mock(
+ "pidlockfile.remove_existing_pidfile",
+ tracker=testcase.mock_tracker)
+
+ if scenario_name is not None:
+ set_pidlockfile_scenario(testcase, scenario_name, clear_tracker=False)
+
+
+def set_pidlockfile_scenario(testcase, scenario_name, clear_tracker=True):
+ """ Set up the test case to the specified scenario. """
+ testcase.scenario = testcase.pidlockfile_scenarios[scenario_name]
+ setup_lockfile_method_mocks(
+ testcase, testcase.scenario, "lockfile.LinkFileLock")
+ testcase.pidlockfile_args = dict(
+ path=testcase.scenario['path'],
+ )
+ testcase.test_instance = pidlockfile.PIDLockFile(
+ **testcase.pidlockfile_args)
+ if clear_tracker:
+ testcase.mock_tracker.clear()
+
+
+class PIDLockFile_TestCase(scaffold.TestCase):
+ """ Test cases for PIDLockFile class. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ setup_pidlockfile_fixtures(self, 'exist-other-pid')
+
+ def tearDown(self):
+ """ Tear down test fixtures. """
+ scaffold.mock_restore()
+
+ def test_instantiate(self):
+ """ New instance of PIDLockFile should be created. """
+ instance = self.test_instance
+ self.failUnlessIsInstance(instance, pidlockfile.PIDLockFile)
+
+ def test_inherits_from_linkfilelock(self):
+ """ Should inherit from LinkFileLock. """
+ instance = self.test_instance
+ self.failUnlessIsInstance(instance, lockfile.LinkFileLock)
+
+ def test_has_specified_path(self):
+ """ Should have specified path. """
+ instance = self.test_instance
+ expect_path = self.scenario['path']
+ self.failUnlessEqual(expect_path, instance.path)
+
+
+class PIDLockFile_read_pid_TestCase(scaffold.TestCase):
+ """ Test cases for PIDLockFile.read_pid method. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ setup_pidlockfile_fixtures(self, 'exist-other-pid')
+
+ def tearDown(self):
+ """ Tear down test fixtures. """
+ scaffold.mock_restore()
+
+ def test_gets_pid_via_read_pid_from_pidfile(self):
+ """ Should get PID via read_pid_from_pidfile. """
+ instance = self.test_instance
+ test_pid = self.scenario['pidfile_pid']
+ expect_pid = test_pid
+ result = instance.read_pid()
+ self.failUnlessEqual(expect_pid, result)
+
+
+class PIDLockFile_acquire_TestCase(scaffold.TestCase):
+ """ Test cases for PIDLockFile.acquire function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ setup_pidlockfile_fixtures(self)
+ set_pidlockfile_scenario(self, 'not-exist')
+
+ def tearDown(self):
+ """ Tear down test fixtures. """
+ scaffold.mock_restore()
+
+ def test_calls_linkfilelock_acquire(self):
+ """ Should first call LinkFileLock.acquire method. """
+ instance = self.test_instance
+ expect_mock_output = """\
+ Called lockfile.LinkFileLock.acquire()
+ ...
+ """
+ instance.acquire()
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_calls_linkfilelock_acquire_with_timeout(self):
+ """ Should call LinkFileLock.acquire method with specified timeout. """
+ instance = self.test_instance
+ test_timeout = object()
+ expect_mock_output = """\
+ Called lockfile.LinkFileLock.acquire(timeout=%(test_timeout)r)
+ ...
+ """ % vars()
+ instance.acquire(timeout=test_timeout)
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_writes_pid_to_specified_file(self):
+ """ Should request writing current PID to specified file. """
+ instance = self.test_instance
+ pidfile_path = self.scenario['path']
+ expect_mock_output = """\
+ ...
+ Called pidlockfile.write_pid_to_pidfile(%(pidfile_path)r)
+ """ % vars()
+ instance.acquire()
+ scaffold.mock_restore()
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_raises_lock_failed_on_write_error(self):
+ """ Should raise LockFailed error if write fails. """
+ set_pidlockfile_scenario(self, 'not-exist-write-busy')
+ instance = self.test_instance
+ pidfile_path = self.scenario['path']
+ mock_error = OSError(errno.EBUSY, "Bad stuff", pidfile_path)
+ pidlockfile.write_pid_to_pidfile.mock_raises = mock_error
+ expect_error = pidlockfile.LockFailed
+ self.failUnlessRaises(
+ expect_error,
+ instance.acquire)
+
+
+class PIDLockFile_release_TestCase(scaffold.TestCase):
+ """ Test cases for PIDLockFile.release function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ setup_pidlockfile_fixtures(self)
+
+ def tearDown(self):
+ """ Tear down test fixtures. """
+ scaffold.mock_restore()
+
+ def test_does_not_remove_existing_pidfile_if_not_locking(self):
+ """ Should not request removal of PID file if not locking. """
+ set_pidlockfile_scenario(self, 'exist-empty')
+ instance = self.test_instance
+ expect_error = lockfile.NotLocked
+ unwanted_mock_output = (
+ "..."
+ "Called pidlockfile.remove_existing_pidfile"
+ "...")
+ self.failUnlessRaises(
+ expect_error,
+ instance.release)
+ self.failIfMockCheckerMatch(unwanted_mock_output)
+
+ def test_does_not_remove_existing_pidfile_if_not_my_lock(self):
+ """ Should not request removal of PID file if we are not locking. """
+ set_pidlockfile_scenario(self, 'exist-other-pid-locked')
+ instance = self.test_instance
+ expect_error = lockfile.NotMyLock
+ unwanted_mock_output = (
+ "..."
+ "Called pidlockfile.remove_existing_pidfile"
+ "...")
+ self.failUnlessRaises(
+ expect_error,
+ instance.release)
+ self.failIfMockCheckerMatch(unwanted_mock_output)
+
+ def test_removes_existing_pidfile_if_i_am_locking(self):
+ """ Should request removal of specified PID file if lock is ours. """
+ set_pidlockfile_scenario(self, 'exist-current-pid-locked')
+ instance = self.test_instance
+ pidfile_path = self.scenario['path']
+ expect_mock_output = """\
+ ...
+ Called pidlockfile.remove_existing_pidfile(%(pidfile_path)r)
+ ...
+ """ % vars()
+ instance.release()
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_calls_linkfilelock_release(self):
+ """ Should finally call LinkFileLock.release method. """
+ set_pidlockfile_scenario(self, 'exist-current-pid-locked')
+ instance = self.test_instance
+ expect_mock_output = """\
+ ...
+ Called lockfile.LinkFileLock.release()
+ """
+ instance.release()
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+
+class PIDLockFile_break_lock_TestCase(scaffold.TestCase):
+ """ Test cases for PIDLockFile.break_lock function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ setup_pidlockfile_fixtures(self)
+ set_pidlockfile_scenario(self, 'exist-other-pid-locked')
+
+ def tearDown(self):
+ """ Tear down test fixtures. """
+ scaffold.mock_restore()
+
+ def test_calls_linkfilelock_break_lock(self):
+ """ Should first call LinkFileLock.break_lock method. """
+ instance = self.test_instance
+ expect_mock_output = """\
+ Called lockfile.LinkFileLock.break_lock()
+ ...
+ """
+ instance.break_lock()
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_removes_existing_pidfile(self):
+ """ Should request removal of specified PID file. """
+ instance = self.test_instance
+ pidfile_path = self.scenario['path']
+ expect_mock_output = """\
+ ...
+ Called pidlockfile.remove_existing_pidfile(%(pidfile_path)r)
+ """ % vars()
+ instance.break_lock()
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+
+class read_pid_from_pidfile_TestCase(scaffold.TestCase):
+ """ Test cases for read_pid_from_pidfile function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ setup_pidfile_fixtures(self)
+
+ def tearDown(self):
+ """ Tear down test fixtures. """
+ scaffold.mock_restore()
+
+ def test_opens_specified_filename(self):
+ """ Should attempt to open specified pidfile filename. """
+ set_pidlockfile_scenario(self, 'exist-other-pid')
+ pidfile_path = self.scenario['path']
+ expect_mock_output = """\
+ Called __builtin__.open(%(pidfile_path)r, 'r')
+ """ % vars()
+ dummy = pidlockfile.read_pid_from_pidfile(pidfile_path)
+ scaffold.mock_restore()
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_reads_pid_from_file(self):
+ """ Should read the PID from the specified file. """
+ set_pidlockfile_scenario(self, 'exist-other-pid')
+ pidfile_path = self.scenario['path']
+ expect_pid = self.scenario['pidfile_pid']
+ pid = pidlockfile.read_pid_from_pidfile(pidfile_path)
+ scaffold.mock_restore()
+ self.failUnlessEqual(expect_pid, pid)
+
+ def test_returns_none_when_file_nonexist(self):
+ """ Should return None when the PID file does not exist. """
+ set_pidlockfile_scenario(self, 'not-exist')
+ pidfile_path = self.scenario['path']
+ pid = pidlockfile.read_pid_from_pidfile(pidfile_path)
+ scaffold.mock_restore()
+ self.failUnlessIs(None, pid)
+
+ def test_raises_error_when_file_read_fails(self):
+ """ Should raise error when the PID file read fails. """
+ set_pidlockfile_scenario(self, 'exist-read-denied')
+ pidfile_path = self.scenario['path']
+ expect_error = EnvironmentError
+ self.failUnlessRaises(
+ expect_error,
+ pidlockfile.read_pid_from_pidfile, pidfile_path)
+
+ def test_raises_error_when_file_empty(self):
+ """ Should raise error when the PID file is empty. """
+ set_pidlockfile_scenario(self, 'exist-empty')
+ pidfile_path = self.scenario['path']
+ expect_error = pidlockfile.PIDFileParseError
+ self.failUnlessRaises(
+ expect_error,
+ pidlockfile.read_pid_from_pidfile, pidfile_path)
+
+ def test_raises_error_when_file_contents_invalid(self):
+ """ Should raise error when the PID file contents are invalid. """
+ set_pidlockfile_scenario(self, 'exist-invalid')
+ pidfile_path = self.scenario['path']
+ expect_error = pidlockfile.PIDFileParseError
+ self.failUnlessRaises(
+ expect_error,
+ pidlockfile.read_pid_from_pidfile, pidfile_path)
+
+
+class remove_existing_pidfile_TestCase(scaffold.TestCase):
+ """ Test cases for remove_existing_pidfile function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ setup_pidfile_fixtures(self)
+
+ scaffold.mock(
+ "os.remove",
+ tracker=self.mock_tracker)
+
+ def tearDown(self):
+ """ Tear down test fixtures. """
+ scaffold.mock_restore()
+
+ def test_removes_specified_filename(self):
+ """ Should attempt to remove specified PID file filename. """
+ set_pidlockfile_scenario(self, 'exist-current-pid')
+ pidfile_path = self.scenario['path']
+ expect_mock_output = """\
+ Called os.remove(%(pidfile_path)r)
+ """ % vars()
+ pidlockfile.remove_existing_pidfile(pidfile_path)
+ scaffold.mock_restore()
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_ignores_file_not_exist_error(self):
+ """ Should ignore error if file does not exist. """
+ set_pidlockfile_scenario(self, 'not-exist')
+ pidfile_path = self.scenario['path']
+ mock_error = OSError(errno.ENOENT, "Not there", pidfile_path)
+ os.remove.mock_raises = mock_error
+ expect_mock_output = """\
+ Called os.remove(%(pidfile_path)r)
+ """ % vars()
+ pidlockfile.remove_existing_pidfile(pidfile_path)
+ scaffold.mock_restore()
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_propagates_arbitrary_oserror(self):
+ """ Should propagate any OSError other than ENOENT. """
+ set_pidlockfile_scenario(self, 'exist-current-pid')
+ pidfile_path = self.scenario['path']
+ mock_error = OSError(errno.EACCES, "Denied", pidfile_path)
+ os.remove.mock_raises = mock_error
+ self.failUnlessRaises(
+ type(mock_error),
+ pidlockfile.remove_existing_pidfile,
+ pidfile_path)
+
+
+class write_pid_to_pidfile_TestCase(scaffold.TestCase):
+ """ Test cases for write_pid_to_pidfile function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ setup_pidfile_fixtures(self)
+ set_pidlockfile_scenario(self, 'not-exist')
+
+ def tearDown(self):
+ """ Tear down test fixtures. """
+ scaffold.mock_restore()
+
+ def test_opens_specified_filename(self):
+ """ Should attempt to open specified PID file filename. """
+ pidfile_path = self.scenario['path']
+ expect_flags = (os.O_CREAT | os.O_EXCL | os.O_WRONLY)
+ expect_mode = 0644
+ expect_mock_output = """\
+ Called os.open(%(pidfile_path)r, %(expect_flags)r, %(expect_mode)r)
+ ...
+ """ % vars()
+ pidlockfile.write_pid_to_pidfile(pidfile_path)
+ scaffold.mock_restore()
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_writes_pid_to_file(self):
+ """ Should write the current PID to the specified file. """
+ pidfile_path = self.scenario['path']
+ self.scenario['pidfile'].close = scaffold.Mock(
+ "PIDLockFile.close",
+ tracker=self.mock_tracker)
+ expect_line = "%(pid)d\n" % self.scenario
+ pidlockfile.write_pid_to_pidfile(pidfile_path)
+ scaffold.mock_restore()
+ self.failUnlessEqual(expect_line, self.scenario['pidfile'].getvalue())
+
+ def test_closes_file_after_write(self):
+ """ Should close the specified file after writing. """
+ pidfile_path = self.scenario['path']
+ self.scenario['pidfile'].write = scaffold.Mock(
+ "PIDLockFile.write",
+ tracker=self.mock_tracker)
+ self.scenario['pidfile'].close = scaffold.Mock(
+ "PIDLockFile.close",
+ tracker=self.mock_tracker)
+ expect_mock_output = """\
+ ...
+ Called PIDLockFile.write(...)
+ Called PIDLockFile.close()
+ """ % vars()
+ pidlockfile.write_pid_to_pidfile(pidfile_path)
+ scaffold.mock_restore()
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+
+class TimeoutPIDLockFile_TestCase(scaffold.TestCase):
+ """ Test cases for ‘TimeoutPIDLockFile’ class. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ self.mock_tracker = scaffold.MockTracker()
+
+ pidlockfile_scenarios = make_pidlockfile_scenarios()
+ self.pidlockfile_scenario = pidlockfile_scenarios['simple']
+ pidfile_path = self.pidlockfile_scenario['path']
+
+ scaffold.mock(
+ "pidlockfile.PIDLockFile.__init__",
+ tracker=self.mock_tracker)
+ scaffold.mock(
+ "pidlockfile.PIDLockFile.acquire",
+ tracker=self.mock_tracker)
+
+ self.scenario = {
+ 'pidfile_path': self.pidlockfile_scenario['path'],
+ 'acquire_timeout': object(),
+ }
+
+ self.test_kwargs = dict(
+ path=self.scenario['pidfile_path'],
+ acquire_timeout=self.scenario['acquire_timeout'],
+ )
+ self.test_instance = pidlockfile.TimeoutPIDLockFile(**self.test_kwargs)
+
+ def tearDown(self):
+ """ Tear down test fixtures. """
+ scaffold.mock_restore()
+
+ def test_inherits_from_pidlockfile(self):
+ """ Should inherit from PIDLockFile. """
+ instance = self.test_instance
+ self.failUnlessIsInstance(instance, pidlockfile.PIDLockFile)
+
+ def test_init_has_expected_signature(self):
+ """ Should have expected signature for ‘__init__’. """
+ def test_func(self, path, acquire_timeout=None, *args, **kwargs): pass
+ test_func.__name__ = '__init__'
+ self.failUnlessFunctionSignatureMatch(
+ test_func,
+ pidlockfile.TimeoutPIDLockFile.__init__)
+
+ def test_has_specified_acquire_timeout(self):
+ """ Should have specified ‘acquire_timeout’ value. """
+ instance = self.test_instance
+ expect_timeout = self.test_kwargs['acquire_timeout']
+ self.failUnlessEqual(expect_timeout, instance.acquire_timeout)
+
+ def test_calls_superclass_init(self):
+ """ Should call the superclass ‘__init__’. """
+ expect_path = self.test_kwargs['path']
+ expect_mock_output = """\
+ Called pidlockfile.PIDLockFile.__init__(
+ %(expect_path)r)
+ """ % vars()
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_acquire_uses_specified_timeout(self):
+ """ Should call the superclass ‘acquire’ with specified timeout. """
+ instance = self.test_instance
+ test_timeout = object()
+ expect_timeout = test_timeout
+ self.mock_tracker.clear()
+ expect_mock_output = """\
+ Called pidlockfile.PIDLockFile.acquire(%(expect_timeout)r)
+ """ % vars()
+ instance.acquire(test_timeout)
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_acquire_uses_stored_timeout_by_default(self):
+ """ Should call superclass ‘acquire’ with stored timeout by default. """
+ instance = self.test_instance
+ test_timeout = self.test_kwargs['acquire_timeout']
+ expect_timeout = test_timeout
+ self.mock_tracker.clear()
+ expect_mock_output = """\
+ Called pidlockfile.PIDLockFile.acquire(%(expect_timeout)r)
+ """ % vars()
+ instance.acquire()
+ self.failUnlessMockCheckerMatch(expect_mock_output)
diff --git a/test/test_runner.py b/test/test_runner.py
new file mode 100644
index 0000000..11551ab
--- /dev/null
+++ b/test/test_runner.py
@@ -0,0 +1,662 @@
+# -*- coding: utf-8 -*-
+#
+# test/test_runner.py
+# Part of python-daemon, an implementation of PEP 3143.
+#
+# Copyright © 2009–2010 Ben Finney <ben+python@benfinney.id.au>
+#
+# This is free software: you may copy, modify, and/or distribute this work
+# under the terms of the Python Software Foundation License, version 2 or
+# later as published by the Python Software Foundation.
+# No warranty expressed or implied. See the file LICENSE.PSF-2 for details.
+
+""" Unit test for runner module.
+ """
+
+import __builtin__
+import os
+import sys
+import tempfile
+import errno
+import signal
+
+import scaffold
+from test_pidlockfile import (
+ FakeFileDescriptorStringIO,
+ setup_pidfile_fixtures,
+ make_pidlockfile_scenarios,
+ setup_lockfile_method_mocks,
+ )
+from test_daemon import (
+ setup_streams_fixtures,
+ )
+import daemon.daemon
+
+from daemon import pidlockfile
+from daemon import runner
+
+
+class Exception_TestCase(scaffold.Exception_TestCase):
+ """ Test cases for module exception classes. """
+
+ def __init__(self, *args, **kwargs):
+ """ Set up a new instance. """
+ super(Exception_TestCase, self).__init__(*args, **kwargs)
+
+ self.valid_exceptions = {
+ runner.DaemonRunnerError: dict(
+ min_args = 1,
+ types = (Exception,),
+ ),
+ runner.DaemonRunnerInvalidActionError: dict(
+ min_args = 1,
+ types = (runner.DaemonRunnerError, ValueError),
+ ),
+ runner.DaemonRunnerStartFailureError: dict(
+ min_args = 1,
+ types = (runner.DaemonRunnerError, RuntimeError),
+ ),
+ runner.DaemonRunnerStopFailureError: dict(
+ min_args = 1,
+ types = (runner.DaemonRunnerError, RuntimeError),
+ ),
+ }
+
+
+def make_runner_scenarios():
+ """ Make a collection of scenarios for testing DaemonRunner instances. """
+
+ pidlockfile_scenarios = make_pidlockfile_scenarios()
+
+ scenarios = {
+ 'simple': {
+ 'pidlockfile_scenario_name': 'simple',
+ },
+ 'pidfile-locked': {
+ 'pidlockfile_scenario_name': 'exist-other-pid-locked',
+ },
+ }
+
+ for scenario in scenarios.values():
+ if 'pidlockfile_scenario_name' in scenario:
+ pidlockfile_scenario = pidlockfile_scenarios.pop(
+ scenario['pidlockfile_scenario_name'])
+ scenario['pid'] = pidlockfile_scenario['pid']
+ scenario['pidfile_path'] = pidlockfile_scenario['path']
+ scenario['pidfile_timeout'] = 23
+ scenario['pidlockfile_scenario'] = pidlockfile_scenario
+
+ return scenarios
+
+
+def set_runner_scenario(testcase, scenario_name, clear_tracker=True):
+ """ Set the DaemonRunner test scenario for the test case. """
+ scenarios = testcase.runner_scenarios
+ testcase.scenario = scenarios[scenario_name]
+ set_pidlockfile_scenario(
+ testcase, testcase.scenario['pidlockfile_scenario_name'])
+ if clear_tracker:
+ testcase.mock_tracker.clear()
+
+
+def set_pidlockfile_scenario(testcase, scenario_name):
+ """ Set the PIDLockFile test scenario for the test case. """
+ scenarios = testcase.pidlockfile_scenarios
+ testcase.pidlockfile_scenario = scenarios[scenario_name]
+ setup_lockfile_method_mocks(
+ testcase, testcase.pidlockfile_scenario,
+ testcase.lockfile_class_name)
+
+
+def setup_runner_fixtures(testcase):
+ """ Set up common test fixtures for DaemonRunner test case. """
+ testcase.mock_tracker = scaffold.MockTracker()
+
+ setup_pidfile_fixtures(testcase)
+ setup_streams_fixtures(testcase)
+
+ testcase.runner_scenarios = make_runner_scenarios()
+
+ testcase.mock_stderr = FakeFileDescriptorStringIO()
+ scaffold.mock(
+ "sys.stderr",
+ mock_obj=testcase.mock_stderr,
+ tracker=testcase.mock_tracker)
+
+ simple_scenario = testcase.runner_scenarios['simple']
+
+ testcase.lockfile_class_name = "pidlockfile.TimeoutPIDLockFile"
+
+ testcase.mock_runner_lock = scaffold.Mock(
+ testcase.lockfile_class_name,
+ tracker=testcase.mock_tracker)
+ testcase.mock_runner_lock.path = simple_scenario['pidfile_path']
+
+ scaffold.mock(
+ testcase.lockfile_class_name,
+ returns=testcase.mock_runner_lock,
+ tracker=testcase.mock_tracker)
+
+ class TestApp(object):
+
+ def __init__(self):
+ self.stdin_path = testcase.stream_file_paths['stdin']
+ self.stdout_path = testcase.stream_file_paths['stdout']
+ self.stderr_path = testcase.stream_file_paths['stderr']
+ self.pidfile_path = simple_scenario['pidfile_path']
+ self.pidfile_timeout = simple_scenario['pidfile_timeout']
+
+ run = scaffold.Mock(
+ "TestApp.run",
+ tracker=testcase.mock_tracker)
+
+ testcase.TestApp = TestApp
+
+ scaffold.mock(
+ "daemon.runner.DaemonContext",
+ returns=scaffold.Mock(
+ "DaemonContext",
+ tracker=testcase.mock_tracker),
+ tracker=testcase.mock_tracker)
+
+ testcase.test_app = testcase.TestApp()
+
+ testcase.test_program_name = "bazprog"
+ testcase.test_program_path = (
+ "/foo/bar/%(test_program_name)s" % vars(testcase))
+ testcase.valid_argv_params = {
+ 'start': [testcase.test_program_path, 'start'],
+ 'stop': [testcase.test_program_path, 'stop'],
+ 'restart': [testcase.test_program_path, 'restart'],
+ }
+
+ def mock_open(filename, mode=None, buffering=None):
+ if filename in testcase.stream_files_by_path:
+ result = testcase.stream_files_by_path[filename]
+ else:
+ result = FakeFileDescriptorStringIO()
+ result.mode = mode
+ result.buffering = buffering
+ return result
+
+ scaffold.mock(
+ "__builtin__.open",
+ returns_func=mock_open,
+ tracker=testcase.mock_tracker)
+
+ scaffold.mock(
+ "os.kill",
+ tracker=testcase.mock_tracker)
+
+ scaffold.mock(
+ "sys.argv",
+ mock_obj=testcase.valid_argv_params['start'],
+ tracker=testcase.mock_tracker)
+
+ testcase.test_instance = runner.DaemonRunner(testcase.test_app)
+
+ testcase.scenario = NotImplemented
+
+
+class DaemonRunner_TestCase(scaffold.TestCase):
+ """ Test cases for DaemonRunner class. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ setup_runner_fixtures(self)
+ set_runner_scenario(self, 'simple')
+
+ scaffold.mock(
+ "runner.DaemonRunner.parse_args",
+ tracker=self.mock_tracker)
+
+ self.test_instance = runner.DaemonRunner(self.test_app)
+
+ def tearDown(self):
+ """ Tear down test fixtures. """
+ scaffold.mock_restore()
+
+ def test_instantiate(self):
+ """ New instance of DaemonRunner should be created. """
+ self.failUnlessIsInstance(self.test_instance, runner.DaemonRunner)
+
+ def test_parses_commandline_args(self):
+ """ Should parse commandline arguments. """
+ expect_mock_output = """\
+ Called runner.DaemonRunner.parse_args()
+ ...
+ """
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_has_specified_app(self):
+ """ Should have specified application object. """
+ self.failUnlessIs(self.test_app, self.test_instance.app)
+
+ def test_sets_pidfile_none_when_pidfile_path_is_none(self):
+ """ Should set ‘pidfile’ to ‘None’ when ‘pidfile_path’ is ‘None’. """
+ pidfile_path = None
+ self.test_app.pidfile_path = pidfile_path
+ expect_pidfile = None
+ instance = runner.DaemonRunner(self.test_app)
+ self.failUnlessIs(expect_pidfile, instance.pidfile)
+
+ def test_error_when_pidfile_path_not_string(self):
+ """ Should raise ValueError when PID file path not a string. """
+ pidfile_path = object()
+ self.test_app.pidfile_path = pidfile_path
+ expect_error = ValueError
+ self.failUnlessRaises(
+ expect_error,
+ runner.DaemonRunner, self.test_app)
+
+ def test_error_when_pidfile_path_not_absolute(self):
+ """ Should raise ValueError when PID file path not absolute. """
+ pidfile_path = "foo/bar.pid"
+ self.test_app.pidfile_path = pidfile_path
+ expect_error = ValueError
+ self.failUnlessRaises(
+ expect_error,
+ runner.DaemonRunner, self.test_app)
+
+ def test_creates_lock_with_specified_parameters(self):
+ """ Should create a TimeoutPIDLockFile with specified params. """
+ pidfile_path = self.scenario['pidfile_path']
+ pidfile_timeout = self.scenario['pidfile_timeout']
+ lockfile_class_name = self.lockfile_class_name
+ expect_mock_output = """\
+ ...
+ Called %(lockfile_class_name)s(
+ %(pidfile_path)r,
+ %(pidfile_timeout)r)
+ """ % vars()
+ scaffold.mock_restore()
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_has_created_pidfile(self):
+ """ Should have new PID lock file as `pidfile` attribute. """
+ expect_pidfile = self.mock_runner_lock
+ instance = self.test_instance
+ self.failUnlessIs(
+ expect_pidfile, instance.pidfile)
+
+ def test_daemon_context_has_created_pidfile(self):
+ """ DaemonContext component should have new PID lock file. """
+ expect_pidfile = self.mock_runner_lock
+ daemon_context = self.test_instance.daemon_context
+ self.failUnlessIs(
+ expect_pidfile, daemon_context.pidfile)
+
+ def test_daemon_context_has_specified_stdin_stream(self):
+ """ DaemonContext component should have specified stdin file. """
+ test_app = self.test_app
+ expect_file = self.stream_files_by_name['stdin']
+ daemon_context = self.test_instance.daemon_context
+ self.failUnlessEqual(expect_file, daemon_context.stdin)
+
+ def test_daemon_context_has_stdin_in_read_mode(self):
+ """ DaemonContext component should open stdin file for read. """
+ expect_mode = 'r'
+ daemon_context = self.test_instance.daemon_context
+ self.failUnlessIn(daemon_context.stdin.mode, expect_mode)
+
+ def test_daemon_context_has_specified_stdout_stream(self):
+ """ DaemonContext component should have specified stdout file. """
+ test_app = self.test_app
+ expect_file = self.stream_files_by_name['stdout']
+ daemon_context = self.test_instance.daemon_context
+ self.failUnlessEqual(expect_file, daemon_context.stdout)
+
+ def test_daemon_context_has_stdout_in_append_mode(self):
+ """ DaemonContext component should open stdout file for append. """
+ expect_mode = 'w+'
+ daemon_context = self.test_instance.daemon_context
+ self.failUnlessIn(daemon_context.stdout.mode, expect_mode)
+
+ def test_daemon_context_has_specified_stderr_stream(self):
+ """ DaemonContext component should have specified stderr file. """
+ test_app = self.test_app
+ expect_file = self.stream_files_by_name['stderr']
+ daemon_context = self.test_instance.daemon_context
+ self.failUnlessEqual(expect_file, daemon_context.stderr)
+
+ def test_daemon_context_has_stderr_in_append_mode(self):
+ """ DaemonContext component should open stderr file for append. """
+ expect_mode = 'w+'
+ daemon_context = self.test_instance.daemon_context
+ self.failUnlessIn(daemon_context.stderr.mode, expect_mode)
+
+ def test_daemon_context_has_stderr_with_no_buffering(self):
+ """ DaemonContext component should open stderr file unbuffered. """
+ expect_buffering = 0
+ daemon_context = self.test_instance.daemon_context
+ self.failUnlessEqual(
+ expect_buffering, daemon_context.stderr.buffering)
+
+
+class DaemonRunner_usage_exit_TestCase(scaffold.TestCase):
+ """ Test cases for DaemonRunner.usage_exit method. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ setup_runner_fixtures(self)
+ set_runner_scenario(self, 'simple')
+
+ def tearDown(self):
+ """ Tear down test fixtures. """
+ scaffold.mock_restore()
+
+ def test_raises_system_exit(self):
+ """ Should raise SystemExit exception. """
+ instance = self.test_instance
+ argv = [self.test_program_path]
+ self.failUnlessRaises(
+ SystemExit,
+ instance._usage_exit, argv)
+
+ def test_message_follows_conventional_format(self):
+ """ Should emit a conventional usage message. """
+ instance = self.test_instance
+ progname = self.test_program_name
+ argv = [self.test_program_path]
+ expect_stderr_output = """\
+ usage: %(progname)s ...
+ """ % vars()
+ self.failUnlessRaises(
+ SystemExit,
+ instance._usage_exit, argv)
+ self.failUnlessOutputCheckerMatch(
+ expect_stderr_output, self.mock_stderr.getvalue())
+
+
+class DaemonRunner_parse_args_TestCase(scaffold.TestCase):
+ """ Test cases for DaemonRunner.parse_args method. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ setup_runner_fixtures(self)
+ set_runner_scenario(self, 'simple')
+
+ scaffold.mock(
+ "daemon.runner.DaemonRunner._usage_exit",
+ raises=NotImplementedError,
+ tracker=self.mock_tracker)
+
+ def tearDown(self):
+ """ Tear down test fixtures. """
+ scaffold.mock_restore()
+
+ def test_emits_usage_message_if_insufficient_args(self):
+ """ Should emit a usage message and exit if too few arguments. """
+ instance = self.test_instance
+ argv = [self.test_program_path]
+ expect_mock_output = """\
+ Called daemon.runner.DaemonRunner._usage_exit(%(argv)r)
+ """ % vars()
+ try:
+ instance.parse_args(argv)
+ except NotImplementedError:
+ pass
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_emits_usage_message_if_unknown_action_arg(self):
+ """ Should emit a usage message and exit if unknown action. """
+ instance = self.test_instance
+ progname = self.test_program_name
+ argv = [self.test_program_path, 'bogus']
+ expect_mock_output = """\
+ Called daemon.runner.DaemonRunner._usage_exit(%(argv)r)
+ """ % vars()
+ try:
+ instance.parse_args(argv)
+ except NotImplementedError:
+ pass
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_should_parse_system_argv_by_default(self):
+ """ Should parse sys.argv by default. """
+ instance = self.test_instance
+ expect_action = 'start'
+ argv = self.valid_argv_params['start']
+ scaffold.mock(
+ "sys.argv",
+ mock_obj=argv,
+ tracker=self.mock_tracker)
+ instance.parse_args()
+ self.failUnlessEqual(expect_action, instance.action)
+
+ def test_sets_action_from_first_argument(self):
+ """ Should set action from first commandline argument. """
+ instance = self.test_instance
+ for name, argv in self.valid_argv_params.items():
+ expect_action = name
+ instance.parse_args(argv)
+ self.failUnlessEqual(expect_action, instance.action)
+
+
+class DaemonRunner_do_action_TestCase(scaffold.TestCase):
+ """ Test cases for DaemonRunner.do_action method. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ setup_runner_fixtures(self)
+ set_runner_scenario(self, 'simple')
+
+ def tearDown(self):
+ """ Tear down test fixtures. """
+ scaffold.mock_restore()
+
+ def test_raises_error_if_unknown_action(self):
+ """ Should emit a usage message and exit if action is unknown. """
+ instance = self.test_instance
+ instance.action = 'bogus'
+ expect_error = runner.DaemonRunnerInvalidActionError
+ self.failUnlessRaises(
+ expect_error,
+ instance.do_action)
+
+
+class DaemonRunner_do_action_start_TestCase(scaffold.TestCase):
+ """ Test cases for DaemonRunner.do_action method, action 'start'. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ setup_runner_fixtures(self)
+ set_runner_scenario(self, 'simple')
+
+ self.test_instance.action = 'start'
+
+ def tearDown(self):
+ """ Tear down test fixtures. """
+ scaffold.mock_restore()
+
+ def test_raises_error_if_pidfile_locked(self):
+ """ Should raise error if PID file is locked. """
+ set_pidlockfile_scenario(self, 'exist-other-pid-locked')
+ instance = self.test_instance
+ instance.daemon_context.open.mock_raises = (
+ pidlockfile.AlreadyLocked)
+ pidfile_path = self.scenario['pidfile_path']
+ expect_error = runner.DaemonRunnerStartFailureError
+ expect_message_content = pidfile_path
+ try:
+ instance.do_action()
+ except expect_error, exc:
+ pass
+ else:
+ raise self.failureException(
+ "Failed to raise " + expect_error.__name__)
+ self.failUnlessIn(str(exc), expect_message_content)
+
+ def test_breaks_lock_if_no_such_process(self):
+ """ Should request breaking lock if PID file process is not running. """
+ set_runner_scenario(self, 'pidfile-locked')
+ instance = self.test_instance
+ self.mock_runner_lock.read_pid.mock_returns = (
+ self.scenario['pidlockfile_scenario']['pidfile_pid'])
+ pidfile_path = self.scenario['pidfile_path']
+ test_pid = self.scenario['pidlockfile_scenario']['pidfile_pid']
+ expect_signal = signal.SIG_DFL
+ error = OSError(errno.ESRCH, "Not running")
+ os.kill.mock_raises = error
+ lockfile_class_name = self.lockfile_class_name
+ expect_mock_output = """\
+ ...
+ Called os.kill(%(test_pid)r, %(expect_signal)r)
+ Called %(lockfile_class_name)s.break_lock()
+ ...
+ """ % vars()
+ instance.do_action()
+ scaffold.mock_restore()
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_requests_daemon_context_open(self):
+ """ Should request the daemon context to open. """
+ instance = self.test_instance
+ expect_mock_output = """\
+ ...
+ Called DaemonContext.open()
+ ...
+ """
+ instance.do_action()
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_emits_start_message_to_stderr(self):
+ """ Should emit start message to stderr. """
+ instance = self.test_instance
+ current_pid = self.scenario['pid']
+ expect_stderr = """\
+ started with pid %(current_pid)d
+ """ % vars()
+ instance.do_action()
+ self.failUnlessOutputCheckerMatch(
+ expect_stderr, self.mock_stderr.getvalue())
+
+ def test_requests_app_run(self):
+ """ Should request the application to run. """
+ instance = self.test_instance
+ expect_mock_output = """\
+ ...
+ Called TestApp.run()
+ """
+ instance.do_action()
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+
+class DaemonRunner_do_action_stop_TestCase(scaffold.TestCase):
+ """ Test cases for DaemonRunner.do_action method, action 'stop'. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ setup_runner_fixtures(self)
+ set_runner_scenario(self, 'pidfile-locked')
+
+ self.test_instance.action = 'stop'
+
+ self.mock_runner_lock.is_locked.mock_returns = True
+ self.mock_runner_lock.i_am_locking.mock_returns = False
+ self.mock_runner_lock.read_pid.mock_returns = (
+ self.scenario['pidlockfile_scenario']['pidfile_pid'])
+
+ def tearDown(self):
+ """ Tear down test fixtures. """
+ scaffold.mock_restore()
+
+ def test_raises_error_if_pidfile_not_locked(self):
+ """ Should raise error if PID file is not locked. """
+ set_runner_scenario(self, 'simple')
+ instance = self.test_instance
+ self.mock_runner_lock.is_locked.mock_returns = False
+ self.mock_runner_lock.i_am_locking.mock_returns = False
+ self.mock_runner_lock.read_pid.mock_returns = (
+ self.scenario['pidlockfile_scenario']['pidfile_pid'])
+ pidfile_path = self.scenario['pidfile_path']
+ expect_error = runner.DaemonRunnerStopFailureError
+ expect_message_content = pidfile_path
+ try:
+ instance.do_action()
+ except expect_error, exc:
+ pass
+ else:
+ raise self.failureException(
+ "Failed to raise " + expect_error.__name__)
+ scaffold.mock_restore()
+ self.failUnlessIn(str(exc), expect_message_content)
+
+ def test_breaks_lock_if_pidfile_stale(self):
+ """ Should break lock if PID file is stale. """
+ instance = self.test_instance
+ pidfile_path = self.scenario['pidfile_path']
+ test_pid = self.scenario['pidlockfile_scenario']['pidfile_pid']
+ expect_signal = signal.SIG_DFL
+ error = OSError(errno.ESRCH, "Not running")
+ os.kill.mock_raises = error
+ lockfile_class_name = self.lockfile_class_name
+ expect_mock_output = """\
+ ...
+ Called %(lockfile_class_name)s.break_lock()
+ """ % vars()
+ instance.do_action()
+ scaffold.mock_restore()
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_sends_terminate_signal_to_process_from_pidfile(self):
+ """ Should send SIGTERM to the daemon process. """
+ instance = self.test_instance
+ test_pid = self.scenario['pidlockfile_scenario']['pidfile_pid']
+ expect_signal = signal.SIGTERM
+ expect_mock_output = """\
+ ...
+ Called os.kill(%(test_pid)r, %(expect_signal)r)
+ """ % vars()
+ instance.do_action()
+ scaffold.mock_restore()
+ self.failUnlessMockCheckerMatch(expect_mock_output)
+
+ def test_raises_error_if_cannot_send_signal_to_process(self):
+ """ Should raise error if cannot send signal to daemon process. """
+ instance = self.test_instance
+ test_pid = self.scenario['pidlockfile_scenario']['pidfile_pid']
+ pidfile_path = self.scenario['pidfile_path']
+ error = OSError(errno.EPERM, "Nice try")
+ os.kill.mock_raises = error
+ expect_error = runner.DaemonRunnerStopFailureError
+ expect_message_content = str(test_pid)
+ try:
+ instance.do_action()
+ except expect_error, exc:
+ pass
+ else:
+ raise self.failureException(
+ "Failed to raise " + expect_error.__name__)
+ self.failUnlessIn(str(exc), expect_message_content)
+
+
+class DaemonRunner_do_action_restart_TestCase(scaffold.TestCase):
+ """ Test cases for DaemonRunner.do_action method, action 'restart'. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ setup_runner_fixtures(self)
+ set_runner_scenario(self, 'pidfile-locked')
+
+ self.test_instance.action = 'restart'
+
+ def tearDown(self):
+ """ Tear down test fixtures. """
+ scaffold.mock_restore()
+
+ def test_requests_stop_then_start(self):
+ """ Should request stop, then start. """
+ instance = self.test_instance
+ scaffold.mock(
+ "daemon.runner.DaemonRunner._start",
+ tracker=self.mock_tracker)
+ scaffold.mock(
+ "daemon.runner.DaemonRunner._stop",
+ tracker=self.mock_tracker)
+ expect_mock_output = """\
+ Called daemon.runner.DaemonRunner._stop()
+ Called daemon.runner.DaemonRunner._start()
+ """
+ instance.do_action()
+ self.failUnlessMockCheckerMatch(expect_mock_output)