2019-02-13 16:16:49 +00:00
|
|
|
# Copyright 2019, David Wilson
|
2017-11-19 13:50:26 +00:00
|
|
|
#
|
|
|
|
# Redistribution and use in source and binary forms, with or without
|
|
|
|
# modification, are permitted provided that the following conditions are met:
|
|
|
|
#
|
2018-02-27 11:28:26 +00:00
|
|
|
# 1. Redistributions of source code must retain the above copyright notice,
|
|
|
|
# this list of conditions and the following disclaimer.
|
2017-11-19 13:50:26 +00:00
|
|
|
#
|
|
|
|
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
|
|
|
# this list of conditions and the following disclaimer in the documentation
|
|
|
|
# and/or other materials provided with the distribution.
|
|
|
|
#
|
|
|
|
# 3. Neither the name of the copyright holder nor the names of its contributors
|
|
|
|
# may be used to endorse or promote products derived from this software without
|
|
|
|
# specific prior written permission.
|
|
|
|
#
|
2018-02-27 11:28:26 +00:00
|
|
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
|
|
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
|
|
|
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
|
|
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
|
|
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
|
|
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
|
|
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
|
|
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
|
|
# POSSIBILITY OF SUCH DAMAGE.
|
2017-11-19 13:50:26 +00:00
|
|
|
|
2019-02-01 00:58:42 +00:00
|
|
|
# !mitogen: minify_safe
|
|
|
|
|
2018-04-08 22:50:57 +00:00
|
|
|
"""
|
|
|
|
Helper functions intended to be executed on the target. These are entrypoints
|
|
|
|
for file transfer, module execution and sundry bits like changing file modes.
|
|
|
|
"""
|
|
|
|
|
issue #199: ansible: stop writing temp files for new style modules
While adding support for non-new style module types, NewStyleRunner
began writing modules to a temporary file, and sys.argv was patched to
actually include the script filename. The argv change was never required
to fix any particular bug, and a search of the standard modules reveals
no argv users. Update argv[0] to be '', like an interactive interpreter
would have.
While fixing #210, new style runner began setting __file__ to the
temporary file path in order to allow apt.py to discover the Ansiballz
temporary directory. 5 out of 1,516 standard modules follow this
pattern, but in each case, none actually attempt to access __file__,
they just call dirname on it. Therefore do not write the contents of
file, simply set it to the path as it would exist, within a real
temporary directory.
Finally move temporary directory creation out of runner and into target.
Now a single directory exists for the duration of a run, and is emptied
by runner.py as necessary after each task invocation.
This could be further extended to stop rewriting non-new-style modules
in a with_items loop, but that's another step.
Finally the last bullet point in the documentation almost isn't a lie
again.
2018-05-04 05:16:25 +00:00
|
|
|
import errno
|
2018-04-28 19:11:03 +00:00
|
|
|
import grp
|
2018-02-27 09:30:41 +00:00
|
|
|
import operator
|
2018-02-27 07:54:40 +00:00
|
|
|
import os
|
|
|
|
import pwd
|
2018-02-27 09:30:41 +00:00
|
|
|
import re
|
2018-06-10 01:11:41 +00:00
|
|
|
import signal
|
2018-02-27 09:30:41 +00:00
|
|
|
import stat
|
2017-09-15 06:24:41 +00:00
|
|
|
import subprocess
|
2018-07-27 20:32:55 +00:00
|
|
|
import sys
|
2018-04-01 14:58:44 +00:00
|
|
|
import tempfile
|
2018-04-20 13:20:05 +00:00
|
|
|
import traceback
|
2018-07-28 19:37:26 +00:00
|
|
|
import types
|
2018-03-26 05:58:06 +00:00
|
|
|
|
2019-01-23 12:44:08 +00:00
|
|
|
# Absolute imports for <2.5.
|
|
|
|
logging = __import__('logging')
|
|
|
|
|
2018-04-08 22:50:57 +00:00
|
|
|
import mitogen.core
|
|
|
|
import mitogen.fork
|
|
|
|
import mitogen.parent
|
2018-06-07 15:44:02 +00:00
|
|
|
import mitogen.service
|
2019-01-23 12:44:08 +00:00
|
|
|
from mitogen.core import b
|
|
|
|
|
|
|
|
try:
|
|
|
|
import json
|
|
|
|
except ImportError:
|
|
|
|
import simplejson as json
|
|
|
|
|
|
|
|
try:
|
|
|
|
reduce
|
2019-01-23 12:44:08 +00:00
|
|
|
except NameError:
|
|
|
|
# Python 3.x.
|
2019-01-23 12:44:08 +00:00
|
|
|
from functools import reduce
|
|
|
|
|
|
|
|
try:
|
|
|
|
BaseException
|
|
|
|
except NameError:
|
|
|
|
# Python 2.4
|
|
|
|
BaseException = Exception
|
|
|
|
|
2018-04-01 10:30:35 +00:00
|
|
|
|
2018-07-27 20:32:55 +00:00
|
|
|
# Ansible since PR #41749 inserts "import __main__" into
|
|
|
|
# ansible.module_utils.basic. Mitogen's importer will refuse such an import, so
|
|
|
|
# we must setup a fake "__main__" before that module is ever imported. The
|
|
|
|
# str() is to cast Unicode to bytes on Python 2.6.
|
|
|
|
if not sys.modules.get(str('__main__')):
|
2018-07-28 19:37:26 +00:00
|
|
|
sys.modules[str('__main__')] = types.ModuleType(str('__main__'))
|
2018-07-27 20:32:55 +00:00
|
|
|
|
|
|
|
import ansible.module_utils.json_utils
|
|
|
|
import ansible_mitogen.runner
|
|
|
|
|
2018-04-01 10:30:35 +00:00
|
|
|
|
|
|
|
LOG = logging.getLogger(__name__)
|
|
|
|
|
2018-08-19 14:59:35 +00:00
|
|
|
MAKE_TEMP_FAILED_MSG = (
|
2019-01-23 12:44:08 +00:00
|
|
|
u"Unable to find a useable temporary directory. This likely means no\n"
|
|
|
|
u"system-supplied TMP directory can be written to, or all directories\n"
|
|
|
|
u"were mounted on 'noexec' filesystems.\n"
|
|
|
|
u"\n"
|
|
|
|
u"The following paths were tried:\n"
|
|
|
|
u" %(namelist)s\n"
|
|
|
|
u"\n"
|
|
|
|
u"Please check '-vvv' output for a log of individual path errors."
|
2018-08-19 14:59:35 +00:00
|
|
|
)
|
|
|
|
|
2019-01-27 03:00:46 +00:00
|
|
|
# Python 2.4/2.5 cannot support fork+threads whatsoever, it doesn't even fix up
|
|
|
|
# interpreter state. So 2.4/2.5 interpreters start .local() contexts for
|
|
|
|
# isolation instead. Since we don't have any crazy memory sharing problems to
|
|
|
|
# avoid, there is no virginal fork parent either. The child is started directly
|
|
|
|
# from the login/become process. In future this will be default everywhere,
|
|
|
|
# fork is brainwrong from the stone age.
|
|
|
|
FORK_SUPPORTED = sys.version_info >= (2, 6)
|
2018-08-19 14:59:35 +00:00
|
|
|
|
2018-04-08 22:50:57 +00:00
|
|
|
#: Initialized to an econtext.parent.Context pointing at a pristine fork of
|
|
|
|
#: the target Python interpreter before it executes any code or imports.
|
|
|
|
_fork_parent = None
|
2018-02-27 15:16:05 +00:00
|
|
|
|
2018-09-11 02:44:17 +00:00
|
|
|
#: Set by :func:`init_child` to the name of a writeable and executable
|
|
|
|
#: temporary directory accessible by the active user account.
|
|
|
|
good_temp_dir = None
|
2018-08-19 17:50:53 +00:00
|
|
|
|
2018-02-14 15:42:14 +00:00
|
|
|
|
2018-12-08 02:41:04 +00:00
|
|
|
def subprocess__Popen__close_fds(self, but):
|
|
|
|
"""
|
|
|
|
issue #362, #435: subprocess.Popen(close_fds=True) aka.
|
|
|
|
AnsibleModule.run_command() loops the entire FD space on Python<3.2.
|
|
|
|
CentOS>5 ships with 1,048,576 FDs by default, resulting in huge (>500ms)
|
|
|
|
latency starting children. Therefore replace Popen._close_fds on Linux with
|
|
|
|
a version that is O(fds) rather than O(_SC_OPEN_MAX).
|
|
|
|
"""
|
|
|
|
try:
|
2019-01-23 12:44:08 +00:00
|
|
|
names = os.listdir(u'/proc/self/fd')
|
2018-12-08 02:41:04 +00:00
|
|
|
except OSError:
|
|
|
|
# May fail if acting on a container that does not have /proc mounted.
|
|
|
|
self._original_close_fds(but)
|
|
|
|
return
|
|
|
|
|
|
|
|
for name in names:
|
|
|
|
if not name.isdigit():
|
|
|
|
continue
|
|
|
|
|
|
|
|
fd = int(name, 10)
|
|
|
|
if fd > 2 and fd != but:
|
|
|
|
try:
|
|
|
|
os.close(fd)
|
|
|
|
except OSError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
|
|
if (
|
2019-01-23 12:44:08 +00:00
|
|
|
sys.platform.startswith(u'linux') and
|
|
|
|
sys.version < u'3.0' and
|
|
|
|
hasattr(subprocess.Popen, u'_close_fds') and
|
2018-12-08 02:41:04 +00:00
|
|
|
not mitogen.is_master
|
|
|
|
):
|
|
|
|
subprocess.Popen._original_close_fds = subprocess.Popen._close_fds
|
|
|
|
subprocess.Popen._close_fds = subprocess__Popen__close_fds
|
2018-09-11 17:22:58 +00:00
|
|
|
|
|
|
|
|
2018-06-07 15:48:42 +00:00
|
|
|
def get_small_file(context, path):
|
2018-04-01 10:30:35 +00:00
|
|
|
"""
|
2019-01-27 03:00:46 +00:00
|
|
|
Basic in-memory caching module fetcher. This generates one roundtrip for
|
2018-04-08 22:50:57 +00:00
|
|
|
every previously unseen file, so it is only a temporary solution.
|
2018-04-01 10:30:35 +00:00
|
|
|
|
|
|
|
:param context:
|
|
|
|
Context we should direct FileService requests to. For now (and probably
|
|
|
|
forever) this is just the top-level Mitogen connection manager process.
|
|
|
|
:param path:
|
|
|
|
Path to fetch from FileService, must previously have been registered by
|
|
|
|
a privileged context using the `register` command.
|
|
|
|
:returns:
|
|
|
|
Bytestring file data.
|
|
|
|
"""
|
2018-06-09 21:11:26 +00:00
|
|
|
pool = mitogen.service.get_or_create_pool(router=context.router)
|
2019-01-23 12:44:08 +00:00
|
|
|
service = pool.get_service(u'mitogen.service.PushFileService')
|
2018-06-07 15:48:42 +00:00
|
|
|
return service.get(path)
|
2018-04-01 10:30:35 +00:00
|
|
|
|
|
|
|
|
2018-04-28 19:11:03 +00:00
|
|
|
def transfer_file(context, in_path, out_path, sync=False, set_owner=False):
|
2018-04-22 01:48:06 +00:00
|
|
|
"""
|
|
|
|
Streamily download a file from the connection multiplexer process in the
|
|
|
|
controller.
|
|
|
|
|
|
|
|
:param mitogen.core.Context context:
|
2019-01-27 03:00:46 +00:00
|
|
|
Reference to the context hosting the FileService that will transmit the
|
|
|
|
file.
|
2018-04-22 01:48:06 +00:00
|
|
|
:param bytes in_path:
|
|
|
|
FileService registered name of the input file.
|
|
|
|
:param bytes out_path:
|
|
|
|
Name of the output path on the local disk.
|
2018-04-28 19:11:03 +00:00
|
|
|
:param bool sync:
|
|
|
|
If :data:`True`, ensure the file content and metadat are fully on disk
|
|
|
|
before renaming the temporary file over the existing file. This should
|
|
|
|
ensure in the case of system crash, either the entire old or new file
|
|
|
|
are visible post-reboot.
|
|
|
|
:param bool set_owner:
|
|
|
|
If :data:`True`, look up the metadata username and group on the local
|
|
|
|
system and file the file owner using :func:`os.fchmod`.
|
2018-04-22 01:48:06 +00:00
|
|
|
"""
|
2018-04-28 19:11:03 +00:00
|
|
|
out_path = os.path.abspath(out_path)
|
|
|
|
fd, tmp_path = tempfile.mkstemp(suffix='.tmp',
|
|
|
|
prefix='.ansible_mitogen_transfer-',
|
|
|
|
dir=os.path.dirname(out_path))
|
|
|
|
fp = os.fdopen(fd, 'wb', mitogen.core.CHUNK_SIZE)
|
2018-04-28 23:31:20 +00:00
|
|
|
LOG.debug('transfer_file(%r) temporary file: %s', out_path, tmp_path)
|
2018-04-28 19:11:03 +00:00
|
|
|
|
2018-04-22 01:48:06 +00:00
|
|
|
try:
|
|
|
|
try:
|
2018-06-07 15:44:02 +00:00
|
|
|
ok, metadata = mitogen.service.FileService.get(
|
|
|
|
context=context,
|
|
|
|
path=in_path,
|
|
|
|
out_fp=fp,
|
|
|
|
)
|
2018-04-28 19:11:03 +00:00
|
|
|
if not ok:
|
2018-04-22 01:48:06 +00:00
|
|
|
raise IOError('transfer of %r was interrupted.' % (in_path,))
|
2018-04-28 21:09:07 +00:00
|
|
|
|
2019-01-23 12:44:08 +00:00
|
|
|
set_file_mode(tmp_path, metadata['mode'], fd=fp.fileno())
|
2018-04-28 21:09:07 +00:00
|
|
|
if set_owner:
|
2019-01-23 12:44:08 +00:00
|
|
|
set_file_owner(tmp_path, metadata['owner'], metadata['group'],
|
|
|
|
fd=fp.fileno())
|
2018-04-28 19:11:03 +00:00
|
|
|
finally:
|
|
|
|
fp.close()
|
2018-04-22 01:48:06 +00:00
|
|
|
|
2018-04-28 19:11:03 +00:00
|
|
|
if sync:
|
|
|
|
os.fsync(fp.fileno())
|
|
|
|
os.rename(tmp_path, out_path)
|
2018-04-28 21:09:07 +00:00
|
|
|
except BaseException:
|
2018-04-28 19:11:03 +00:00
|
|
|
os.unlink(tmp_path)
|
|
|
|
raise
|
|
|
|
|
|
|
|
os.utime(out_path, (metadata['atime'], metadata['mtime']))
|
2018-04-22 01:48:06 +00:00
|
|
|
|
|
|
|
|
issue #199: ansible: stop writing temp files for new style modules
While adding support for non-new style module types, NewStyleRunner
began writing modules to a temporary file, and sys.argv was patched to
actually include the script filename. The argv change was never required
to fix any particular bug, and a search of the standard modules reveals
no argv users. Update argv[0] to be '', like an interactive interpreter
would have.
While fixing #210, new style runner began setting __file__ to the
temporary file path in order to allow apt.py to discover the Ansiballz
temporary directory. 5 out of 1,516 standard modules follow this
pattern, but in each case, none actually attempt to access __file__,
they just call dirname on it. Therefore do not write the contents of
file, simply set it to the path as it would exist, within a real
temporary directory.
Finally move temporary directory creation out of runner and into target.
Now a single directory exists for the duration of a run, and is emptied
by runner.py as necessary after each task invocation.
This could be further extended to stop rewriting non-new-style modules
in a with_items loop, but that's another step.
Finally the last bullet point in the documentation almost isn't a lie
again.
2018-05-04 05:16:25 +00:00
|
|
|
def prune_tree(path):
|
|
|
|
"""
|
|
|
|
Like shutil.rmtree(), but log errors rather than discard them, and do not
|
|
|
|
waste multiple os.stat() calls discovering whether the object can be
|
|
|
|
deleted, just try deleting it instead.
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
os.unlink(path)
|
|
|
|
return
|
2019-01-23 12:44:08 +00:00
|
|
|
except OSError:
|
|
|
|
e = sys.exc_info()[1]
|
issue #199: ansible: stop writing temp files for new style modules
While adding support for non-new style module types, NewStyleRunner
began writing modules to a temporary file, and sys.argv was patched to
actually include the script filename. The argv change was never required
to fix any particular bug, and a search of the standard modules reveals
no argv users. Update argv[0] to be '', like an interactive interpreter
would have.
While fixing #210, new style runner began setting __file__ to the
temporary file path in order to allow apt.py to discover the Ansiballz
temporary directory. 5 out of 1,516 standard modules follow this
pattern, but in each case, none actually attempt to access __file__,
they just call dirname on it. Therefore do not write the contents of
file, simply set it to the path as it would exist, within a real
temporary directory.
Finally move temporary directory creation out of runner and into target.
Now a single directory exists for the duration of a run, and is emptied
by runner.py as necessary after each task invocation.
This could be further extended to stop rewriting non-new-style modules
in a with_items loop, but that's another step.
Finally the last bullet point in the documentation almost isn't a lie
again.
2018-05-04 05:16:25 +00:00
|
|
|
if not (os.path.isdir(path) and
|
|
|
|
e.args[0] in (errno.EPERM, errno.EISDIR)):
|
|
|
|
LOG.error('prune_tree(%r): %s', path, e)
|
|
|
|
return
|
|
|
|
|
|
|
|
try:
|
|
|
|
# Ensure write access for readonly directories. Ignore error in case
|
|
|
|
# path is on a weird filesystem (e.g. vfat).
|
|
|
|
os.chmod(path, int('0700', 8))
|
2019-01-23 12:44:08 +00:00
|
|
|
except OSError:
|
|
|
|
e = sys.exc_info()[1]
|
issue #199: ansible: stop writing temp files for new style modules
While adding support for non-new style module types, NewStyleRunner
began writing modules to a temporary file, and sys.argv was patched to
actually include the script filename. The argv change was never required
to fix any particular bug, and a search of the standard modules reveals
no argv users. Update argv[0] to be '', like an interactive interpreter
would have.
While fixing #210, new style runner began setting __file__ to the
temporary file path in order to allow apt.py to discover the Ansiballz
temporary directory. 5 out of 1,516 standard modules follow this
pattern, but in each case, none actually attempt to access __file__,
they just call dirname on it. Therefore do not write the contents of
file, simply set it to the path as it would exist, within a real
temporary directory.
Finally move temporary directory creation out of runner and into target.
Now a single directory exists for the duration of a run, and is emptied
by runner.py as necessary after each task invocation.
This could be further extended to stop rewriting non-new-style modules
in a with_items loop, but that's another step.
Finally the last bullet point in the documentation almost isn't a lie
again.
2018-05-04 05:16:25 +00:00
|
|
|
LOG.warning('prune_tree(%r): %s', path, e)
|
|
|
|
|
|
|
|
try:
|
|
|
|
for name in os.listdir(path):
|
|
|
|
if name not in ('.', '..'):
|
|
|
|
prune_tree(os.path.join(path, name))
|
|
|
|
os.rmdir(path)
|
2019-01-23 12:44:08 +00:00
|
|
|
except OSError:
|
|
|
|
e = sys.exc_info()[1]
|
issue #199: ansible: stop writing temp files for new style modules
While adding support for non-new style module types, NewStyleRunner
began writing modules to a temporary file, and sys.argv was patched to
actually include the script filename. The argv change was never required
to fix any particular bug, and a search of the standard modules reveals
no argv users. Update argv[0] to be '', like an interactive interpreter
would have.
While fixing #210, new style runner began setting __file__ to the
temporary file path in order to allow apt.py to discover the Ansiballz
temporary directory. 5 out of 1,516 standard modules follow this
pattern, but in each case, none actually attempt to access __file__,
they just call dirname on it. Therefore do not write the contents of
file, simply set it to the path as it would exist, within a real
temporary directory.
Finally move temporary directory creation out of runner and into target.
Now a single directory exists for the duration of a run, and is emptied
by runner.py as necessary after each task invocation.
This could be further extended to stop rewriting non-new-style modules
in a with_items loop, but that's another step.
Finally the last bullet point in the documentation almost isn't a lie
again.
2018-05-04 05:16:25 +00:00
|
|
|
LOG.error('prune_tree(%r): %s', path, e)
|
|
|
|
|
|
|
|
|
|
|
|
def _on_broker_shutdown():
|
|
|
|
"""
|
|
|
|
Respond to broker shutdown (graceful termination by parent, or loss of
|
|
|
|
connection to parent) by deleting our sole temporary directory.
|
|
|
|
"""
|
|
|
|
prune_tree(temp_dir)
|
|
|
|
|
|
|
|
|
2018-10-02 20:06:00 +00:00
|
|
|
def is_good_temp_dir(path):
|
|
|
|
"""
|
|
|
|
Return :data:`True` if `path` can be used as a temporary directory, logging
|
|
|
|
any failures that may cause it to be unsuitable. If the directory doesn't
|
|
|
|
exist, we attempt to create it using :func:`os.makedirs`.
|
|
|
|
"""
|
|
|
|
if not os.path.exists(path):
|
|
|
|
try:
|
|
|
|
os.makedirs(path, mode=int('0700', 8))
|
2019-01-23 12:44:08 +00:00
|
|
|
except OSError:
|
|
|
|
e = sys.exc_info()[1]
|
2018-10-02 20:06:00 +00:00
|
|
|
LOG.debug('temp dir %r unusable: did not exist and attempting '
|
|
|
|
'to create it failed: %s', path, e)
|
|
|
|
return False
|
|
|
|
|
|
|
|
try:
|
|
|
|
tmp = tempfile.NamedTemporaryFile(
|
|
|
|
prefix='ansible_mitogen_is_good_temp_dir',
|
|
|
|
dir=path,
|
|
|
|
)
|
2019-01-23 12:44:08 +00:00
|
|
|
except (OSError, IOError):
|
|
|
|
e = sys.exc_info()[1]
|
2018-10-02 20:06:00 +00:00
|
|
|
LOG.debug('temp dir %r unusable: %s', path, e)
|
|
|
|
return False
|
|
|
|
|
|
|
|
try:
|
|
|
|
try:
|
|
|
|
os.chmod(tmp.name, int('0700', 8))
|
2019-01-23 12:44:08 +00:00
|
|
|
except OSError:
|
|
|
|
e = sys.exc_info()[1]
|
2018-10-26 10:26:15 +00:00
|
|
|
LOG.debug('temp dir %r unusable: chmod failed: %s', path, e)
|
2018-10-02 20:06:00 +00:00
|
|
|
return False
|
|
|
|
|
|
|
|
try:
|
|
|
|
# access(.., X_OK) is sufficient to detect noexec.
|
|
|
|
if not os.access(tmp.name, os.X_OK):
|
|
|
|
raise OSError('filesystem appears to be mounted noexec')
|
2019-01-23 12:44:08 +00:00
|
|
|
except OSError:
|
|
|
|
e = sys.exc_info()[1]
|
2018-10-26 10:11:25 +00:00
|
|
|
LOG.debug('temp dir %r unusable: %s', path, e)
|
2018-10-02 20:06:00 +00:00
|
|
|
return False
|
|
|
|
finally:
|
|
|
|
tmp.close()
|
|
|
|
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
2018-09-11 02:44:17 +00:00
|
|
|
def find_good_temp_dir(candidate_temp_dirs):
|
2018-08-19 17:50:53 +00:00
|
|
|
"""
|
2018-09-11 02:44:17 +00:00
|
|
|
Given a list of candidate temp directories extracted from ``ansible.cfg``,
|
|
|
|
combine it with the Python-builtin list of candidate directories used by
|
|
|
|
:mod:`tempfile`, then iteratively try each until one is found that is both
|
|
|
|
writeable and executable.
|
|
|
|
|
|
|
|
:param list candidate_temp_dirs:
|
|
|
|
List of candidate $variable-expanded and tilde-expanded directory paths
|
|
|
|
that may be usable as a temporary directory.
|
2018-08-19 17:50:53 +00:00
|
|
|
"""
|
|
|
|
paths = [os.path.expandvars(os.path.expanduser(p))
|
2018-09-11 02:44:17 +00:00
|
|
|
for p in candidate_temp_dirs]
|
2018-08-19 17:50:53 +00:00
|
|
|
paths.extend(tempfile._candidate_tempdir_list())
|
|
|
|
|
|
|
|
for path in paths:
|
2018-10-02 20:06:00 +00:00
|
|
|
if is_good_temp_dir(path):
|
2018-08-19 17:50:53 +00:00
|
|
|
LOG.debug('Selected temp directory: %r (from %r)', path, paths)
|
|
|
|
return path
|
|
|
|
|
|
|
|
raise IOError(MAKE_TEMP_FAILED_MSG % {
|
|
|
|
'paths': '\n '.join(paths),
|
|
|
|
})
|
|
|
|
|
|
|
|
|
2018-04-08 22:50:57 +00:00
|
|
|
@mitogen.core.takes_econtext
|
2018-08-19 17:50:53 +00:00
|
|
|
def init_child(econtext, log_level, candidate_temp_dirs):
|
2018-04-08 22:50:57 +00:00
|
|
|
"""
|
|
|
|
Called by ContextService immediately after connection; arranges for the
|
|
|
|
(presently) spotless Python interpreter to be forked, where the newly
|
|
|
|
forked interpreter becomes the parent of any newly forked future
|
|
|
|
interpreters.
|
|
|
|
|
|
|
|
This is necessary to prevent modules that are executed in-process from
|
|
|
|
polluting the global interpreter state in a way that effects explicitly
|
|
|
|
isolated modules.
|
2018-06-09 21:11:26 +00:00
|
|
|
|
2018-07-23 20:33:08 +00:00
|
|
|
:param int log_level:
|
|
|
|
Logging package level active in the master.
|
2018-08-19 17:50:53 +00:00
|
|
|
:param list[str] candidate_temp_dirs:
|
|
|
|
List of $variable-expanded and tilde-expanded directory names to add to
|
|
|
|
candidate list of temporary directories.
|
2018-07-23 20:33:08 +00:00
|
|
|
|
2018-06-09 21:11:26 +00:00
|
|
|
:returns:
|
|
|
|
Dict like::
|
|
|
|
|
|
|
|
{
|
2019-01-27 03:00:46 +00:00
|
|
|
'fork_context': mitogen.core.Context or None,
|
|
|
|
'good_temp_dir': ...
|
|
|
|
'home_dir': str
|
2018-06-09 21:11:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Where `fork_context` refers to the newly forked 'fork parent' context
|
|
|
|
the controller will use to start forked jobs, and `home_dir` is the
|
|
|
|
home directory for the active user account.
|
2018-04-08 22:50:57 +00:00
|
|
|
"""
|
2018-07-23 20:33:08 +00:00
|
|
|
# Copying the master's log level causes log messages to be filtered before
|
|
|
|
# they reach LogForwarder, thus reducing an influx of tiny messges waking
|
|
|
|
# the connection multiplexer process in the master.
|
|
|
|
LOG.setLevel(log_level)
|
|
|
|
logging.getLogger('ansible_mitogen').setLevel(log_level)
|
|
|
|
|
2019-02-13 02:16:49 +00:00
|
|
|
# issue #536: if the json module is available, remove simplejson from the
|
|
|
|
# importer whitelist to avoid confusing certain Ansible modules.
|
|
|
|
if json.__name__ == 'json':
|
|
|
|
econtext.importer.whitelist.remove('simplejson')
|
|
|
|
|
2018-09-11 02:44:17 +00:00
|
|
|
global _fork_parent
|
2019-01-27 03:00:46 +00:00
|
|
|
if FORK_SUPPORTED:
|
|
|
|
mitogen.parent.upgrade_router(econtext)
|
|
|
|
_fork_parent = econtext.router.fork()
|
2018-09-11 02:44:17 +00:00
|
|
|
|
|
|
|
global good_temp_dir
|
|
|
|
good_temp_dir = find_good_temp_dir(candidate_temp_dirs)
|
|
|
|
|
2018-06-09 21:11:26 +00:00
|
|
|
return {
|
2019-01-23 12:44:08 +00:00
|
|
|
u'fork_context': _fork_parent,
|
|
|
|
u'home_dir': mitogen.core.to_text(os.path.expanduser('~')),
|
|
|
|
u'good_temp_dir': good_temp_dir,
|
2018-06-09 21:11:26 +00:00
|
|
|
}
|
|
|
|
|
2018-04-08 22:50:57 +00:00
|
|
|
|
|
|
|
@mitogen.core.takes_econtext
|
2019-01-27 03:00:46 +00:00
|
|
|
def spawn_isolated_child(econtext):
|
2018-06-09 21:11:26 +00:00
|
|
|
"""
|
|
|
|
For helper functions executed in the fork parent context, arrange for
|
|
|
|
the context's router to be upgraded as necessary and for a new child to be
|
|
|
|
prepared.
|
2019-01-27 03:00:46 +00:00
|
|
|
|
|
|
|
The actual fork occurs from the 'virginal fork parent', which does not have
|
|
|
|
any Ansible modules loaded prior to fork, to avoid conflicts resulting from
|
|
|
|
custom module_utils paths.
|
2018-06-09 21:11:26 +00:00
|
|
|
"""
|
2018-04-08 22:50:57 +00:00
|
|
|
mitogen.parent.upgrade_router(econtext)
|
2019-01-27 03:00:46 +00:00
|
|
|
if FORK_SUPPORTED:
|
|
|
|
context = econtext.router.fork()
|
|
|
|
else:
|
|
|
|
context = econtext.router.local()
|
2018-06-09 21:11:26 +00:00
|
|
|
LOG.debug('create_fork_child() -> %r', context)
|
|
|
|
return context
|
2018-04-08 22:50:57 +00:00
|
|
|
|
|
|
|
|
2018-06-09 21:11:26 +00:00
|
|
|
def run_module(kwargs):
|
2018-02-14 15:42:14 +00:00
|
|
|
"""
|
|
|
|
Set up the process environment in preparation for running an Ansible
|
2018-02-15 09:43:19 +00:00
|
|
|
module. This monkey-patches the Ansible libraries in various places to
|
2018-02-14 15:42:14 +00:00
|
|
|
prevent it from trying to kill the process on completion, and to prevent it
|
|
|
|
from reading sys.stdin.
|
|
|
|
"""
|
2018-03-31 04:37:11 +00:00
|
|
|
runner_name = kwargs.pop('runner_name')
|
|
|
|
klass = getattr(ansible_mitogen.runner, runner_name)
|
2019-01-23 12:44:08 +00:00
|
|
|
impl = klass(**mitogen.core.Kwargs(kwargs))
|
2018-04-20 13:20:05 +00:00
|
|
|
return impl.run()
|
|
|
|
|
|
|
|
|
|
|
|
def _get_async_dir():
|
|
|
|
return os.path.expanduser(
|
|
|
|
os.environ.get('ANSIBLE_ASYNC_DIR', '~/.ansible_async')
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2018-06-10 14:27:06 +00:00
|
|
|
class AsyncRunner(object):
|
2019-01-27 03:58:05 +00:00
|
|
|
def __init__(self, job_id, timeout_secs, started_sender, econtext, kwargs):
|
2018-06-10 14:27:06 +00:00
|
|
|
self.job_id = job_id
|
|
|
|
self.timeout_secs = timeout_secs
|
2019-01-27 03:58:05 +00:00
|
|
|
self.started_sender = started_sender
|
2018-06-10 14:27:06 +00:00
|
|
|
self.econtext = econtext
|
|
|
|
self.kwargs = kwargs
|
|
|
|
self._timed_out = False
|
|
|
|
self._init_path()
|
|
|
|
|
|
|
|
def _init_path(self):
|
|
|
|
async_dir = _get_async_dir()
|
|
|
|
if not os.path.exists(async_dir):
|
|
|
|
os.makedirs(async_dir)
|
|
|
|
self.path = os.path.join(async_dir, self.job_id)
|
|
|
|
|
|
|
|
def _update(self, dct):
|
|
|
|
"""
|
|
|
|
Update an async job status file.
|
|
|
|
"""
|
|
|
|
LOG.info('%r._update(%r, %r)', self, self.job_id, dct)
|
|
|
|
dct.setdefault('ansible_job_id', self.job_id)
|
|
|
|
dct.setdefault('data', '')
|
|
|
|
|
2019-01-23 12:44:08 +00:00
|
|
|
fp = open(self.path + '.tmp', 'w')
|
|
|
|
try:
|
2018-06-10 14:27:06 +00:00
|
|
|
fp.write(json.dumps(dct))
|
2019-01-23 12:44:08 +00:00
|
|
|
finally:
|
|
|
|
fp.close()
|
2018-06-10 14:27:06 +00:00
|
|
|
os.rename(self.path + '.tmp', self.path)
|
|
|
|
|
|
|
|
def _on_sigalrm(self, signum, frame):
|
|
|
|
"""
|
|
|
|
Respond to SIGALRM (job timeout) by updating the job file and killing
|
|
|
|
the process.
|
|
|
|
"""
|
|
|
|
msg = "Job reached maximum time limit of %d seconds." % (
|
|
|
|
self.timeout_secs,
|
|
|
|
)
|
|
|
|
self._update({
|
|
|
|
"failed": 1,
|
|
|
|
"finished": 1,
|
|
|
|
"msg": msg,
|
|
|
|
})
|
|
|
|
self._timed_out = True
|
|
|
|
self.econtext.broker.shutdown()
|
|
|
|
|
|
|
|
def _install_alarm(self):
|
|
|
|
signal.signal(signal.SIGALRM, self._on_sigalrm)
|
|
|
|
signal.alarm(self.timeout_secs)
|
|
|
|
|
|
|
|
def _run_module(self):
|
|
|
|
kwargs = dict(self.kwargs, **{
|
|
|
|
'detach': True,
|
|
|
|
'econtext': self.econtext,
|
|
|
|
'emulate_tty': False,
|
|
|
|
})
|
2018-04-17 16:40:45 +00:00
|
|
|
return run_module(kwargs)
|
2018-04-20 13:20:05 +00:00
|
|
|
|
2018-06-10 14:27:06 +00:00
|
|
|
def _parse_result(self, dct):
|
2018-04-20 13:20:05 +00:00
|
|
|
filtered, warnings = (
|
|
|
|
ansible.module_utils.json_utils.
|
|
|
|
_filter_non_json_lines(dct['stdout'])
|
|
|
|
)
|
|
|
|
result = json.loads(filtered)
|
|
|
|
result.setdefault('warnings', []).extend(warnings)
|
2019-02-13 03:36:39 +00:00
|
|
|
result['stderr'] = dct['stderr'] or result.get('stderr', '')
|
2018-06-10 14:27:06 +00:00
|
|
|
self._update(result)
|
|
|
|
|
|
|
|
def _run(self):
|
|
|
|
"""
|
|
|
|
1. Immediately updates the status file to mark the job as started.
|
|
|
|
2. Installs a timer/signal handler to implement the time limit.
|
|
|
|
3. Runs as with run_module(), writing the result to the status file.
|
|
|
|
|
|
|
|
:param dict kwargs:
|
|
|
|
Runner keyword arguments.
|
|
|
|
:param str job_id:
|
|
|
|
String job ID.
|
|
|
|
:param int timeout_secs:
|
|
|
|
If >0, limit the task's maximum run time.
|
|
|
|
"""
|
|
|
|
self._update({
|
|
|
|
'started': 1,
|
|
|
|
'finished': 0,
|
|
|
|
'pid': os.getpid()
|
2018-04-20 13:20:05 +00:00
|
|
|
})
|
2019-01-27 03:58:05 +00:00
|
|
|
self.started_sender.send(True)
|
2018-04-20 13:20:05 +00:00
|
|
|
|
2018-06-10 14:27:06 +00:00
|
|
|
if self.timeout_secs > 0:
|
|
|
|
self._install_alarm()
|
|
|
|
|
|
|
|
dct = self._run_module()
|
|
|
|
if not self._timed_out:
|
|
|
|
# After SIGALRM fires, there is a window between broker responding
|
|
|
|
# to shutdown() by killing the process, and work continuing on the
|
|
|
|
# main thread. If main thread was asleep in at least
|
|
|
|
# basic.py/select.select(), an EINTR will be raised. We want to
|
|
|
|
# discard that exception.
|
|
|
|
try:
|
|
|
|
self._parse_result(dct)
|
|
|
|
except Exception:
|
|
|
|
self._update({
|
|
|
|
"failed": 1,
|
|
|
|
"msg": traceback.format_exc(),
|
|
|
|
"data": dct['stdout'], # temporary notice only
|
|
|
|
"stderr": dct['stderr']
|
|
|
|
})
|
|
|
|
|
|
|
|
def run(self):
|
|
|
|
try:
|
|
|
|
try:
|
|
|
|
self._run()
|
|
|
|
except Exception:
|
|
|
|
self._update({
|
|
|
|
"failed": 1,
|
|
|
|
"msg": traceback.format_exc(),
|
|
|
|
})
|
|
|
|
finally:
|
|
|
|
self.econtext.broker.shutdown()
|
|
|
|
|
2018-04-20 13:20:05 +00:00
|
|
|
|
|
|
|
@mitogen.core.takes_econtext
|
2019-01-27 03:58:05 +00:00
|
|
|
def run_module_async(kwargs, job_id, timeout_secs, started_sender, econtext):
|
2018-04-20 13:20:05 +00:00
|
|
|
"""
|
2018-06-10 14:27:06 +00:00
|
|
|
Execute a module with its run status and result written to a file,
|
|
|
|
terminating on the process on completion. This function must run in a child
|
|
|
|
forked using :func:`create_fork_child`.
|
2019-01-27 03:58:05 +00:00
|
|
|
|
|
|
|
@param mitogen.core.Sender started_sender:
|
|
|
|
A sender that will receive :data:`True` once the job has reached a
|
|
|
|
point where its initial job file has been written. This is required to
|
|
|
|
avoid a race where an overly eager controller can check for a task
|
|
|
|
before it has reached that point in execution, which is possible at
|
|
|
|
least on Python 2.4, where forking is not available for async tasks.
|
2018-04-20 13:20:05 +00:00
|
|
|
"""
|
2019-01-27 03:58:05 +00:00
|
|
|
arunner = AsyncRunner(
|
|
|
|
job_id,
|
|
|
|
timeout_secs,
|
|
|
|
started_sender,
|
|
|
|
econtext,
|
|
|
|
kwargs
|
|
|
|
)
|
2018-06-10 14:27:06 +00:00
|
|
|
arunner.run()
|
2018-02-27 15:16:05 +00:00
|
|
|
|
|
|
|
|
2018-02-27 07:54:40 +00:00
|
|
|
def get_user_shell():
|
2018-02-17 09:45:39 +00:00
|
|
|
"""
|
2018-02-27 07:54:40 +00:00
|
|
|
For commands executed directly via an SSH command-line, SSH looks up the
|
|
|
|
user's shell via getpwuid() and only defaults to /bin/sh if that field is
|
|
|
|
missing or empty.
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
pw_shell = pwd.getpwuid(os.geteuid()).pw_shell
|
|
|
|
except KeyError:
|
|
|
|
pw_shell = None
|
|
|
|
|
|
|
|
return pw_shell or '/bin/sh'
|
|
|
|
|
2018-02-17 09:45:39 +00:00
|
|
|
|
2018-04-04 23:41:14 +00:00
|
|
|
def exec_args(args, in_data='', chdir=None, shell=None, emulate_tty=False):
|
2018-02-27 07:54:40 +00:00
|
|
|
"""
|
|
|
|
Run a command in a subprocess, emulating the argument handling behaviour of
|
|
|
|
SSH.
|
|
|
|
|
2018-04-01 10:30:35 +00:00
|
|
|
:param list[str]:
|
|
|
|
Argument vector.
|
2018-02-27 07:54:40 +00:00
|
|
|
:param bytes in_data:
|
|
|
|
Optional standard input for the command.
|
2018-04-04 23:41:14 +00:00
|
|
|
:param bool emulate_tty:
|
|
|
|
If :data:`True`, arrange for stdout and stderr to be merged into the
|
|
|
|
stdout pipe and for LF to be translated into CRLF, emulating the
|
|
|
|
behaviour of a TTY.
|
2018-02-17 09:45:39 +00:00
|
|
|
:return:
|
|
|
|
(return code, stdout bytes, stderr bytes)
|
|
|
|
"""
|
2018-04-01 10:30:35 +00:00
|
|
|
LOG.debug('exec_args(%r, ..., chdir=%r)', args, chdir)
|
|
|
|
assert isinstance(args, list)
|
2018-02-27 07:54:40 +00:00
|
|
|
|
2018-04-04 23:41:14 +00:00
|
|
|
if emulate_tty:
|
|
|
|
stderr = subprocess.STDOUT
|
|
|
|
else:
|
|
|
|
stderr = subprocess.PIPE
|
|
|
|
|
2018-02-27 07:54:40 +00:00
|
|
|
proc = subprocess.Popen(
|
2018-03-31 04:37:11 +00:00
|
|
|
args=args,
|
2017-09-15 06:24:41 +00:00
|
|
|
stdout=subprocess.PIPE,
|
2018-04-04 23:41:14 +00:00
|
|
|
stderr=stderr,
|
2017-09-15 06:24:41 +00:00
|
|
|
stdin=subprocess.PIPE,
|
2018-02-27 07:54:40 +00:00
|
|
|
cwd=chdir,
|
|
|
|
)
|
2017-09-15 06:24:41 +00:00
|
|
|
stdout, stderr = proc.communicate(in_data)
|
2018-04-04 23:41:14 +00:00
|
|
|
|
|
|
|
if emulate_tty:
|
2019-01-23 12:44:08 +00:00
|
|
|
stdout = stdout.replace(b('\n'), b('\r\n'))
|
|
|
|
return proc.returncode, stdout, stderr or b('')
|
2017-09-15 06:24:41 +00:00
|
|
|
|
|
|
|
|
2018-04-04 23:41:14 +00:00
|
|
|
def exec_command(cmd, in_data='', chdir=None, shell=None, emulate_tty=False):
|
2018-03-31 04:37:11 +00:00
|
|
|
"""
|
|
|
|
Run a command in a subprocess, emulating the argument handling behaviour of
|
|
|
|
SSH.
|
|
|
|
|
|
|
|
:param bytes cmd:
|
|
|
|
String command line, passed to user's shell.
|
|
|
|
:param bytes in_data:
|
|
|
|
Optional standard input for the command.
|
|
|
|
:return:
|
|
|
|
(return code, stdout bytes, stderr bytes)
|
|
|
|
"""
|
2018-04-17 16:40:45 +00:00
|
|
|
assert isinstance(cmd, mitogen.core.UnicodeType)
|
2018-04-02 08:24:17 +00:00
|
|
|
return exec_args(
|
2018-03-31 04:37:11 +00:00
|
|
|
args=[get_user_shell(), '-c', cmd],
|
2018-04-02 08:24:17 +00:00
|
|
|
in_data=in_data,
|
2018-03-31 04:37:11 +00:00
|
|
|
chdir=chdir,
|
|
|
|
shell=shell,
|
2018-04-04 23:41:14 +00:00
|
|
|
emulate_tty=emulate_tty,
|
2018-03-31 04:37:11 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
|
2017-09-15 06:24:41 +00:00
|
|
|
def read_path(path):
|
2018-02-17 09:45:39 +00:00
|
|
|
"""
|
|
|
|
Fetch the contents of a filesystem `path` as bytes.
|
|
|
|
"""
|
2017-11-11 23:36:54 +00:00
|
|
|
return open(path, 'rb').read()
|
2017-09-15 06:24:41 +00:00
|
|
|
|
|
|
|
|
2019-01-23 12:44:08 +00:00
|
|
|
def set_file_owner(path, owner, group=None, fd=None):
|
2018-04-28 19:11:03 +00:00
|
|
|
if owner:
|
|
|
|
uid = pwd.getpwnam(owner).pw_uid
|
|
|
|
else:
|
|
|
|
uid = os.geteuid()
|
|
|
|
|
|
|
|
if group:
|
|
|
|
gid = grp.getgrnam(group).gr_gid
|
|
|
|
else:
|
|
|
|
gid = os.getegid()
|
|
|
|
|
2019-01-23 12:44:08 +00:00
|
|
|
if fd is not None and hasattr(os, 'fchown'):
|
|
|
|
os.fchown(fd, (uid, gid))
|
|
|
|
else:
|
|
|
|
# Python<2.6
|
|
|
|
os.chown(path, (uid, gid))
|
2018-04-28 19:11:03 +00:00
|
|
|
|
|
|
|
|
|
|
|
def write_path(path, s, owner=None, group=None, mode=None,
|
|
|
|
utimes=None, sync=False):
|
2018-02-17 09:45:39 +00:00
|
|
|
"""
|
|
|
|
Writes bytes `s` to a filesystem `path`.
|
|
|
|
"""
|
2018-04-28 19:11:03 +00:00
|
|
|
path = os.path.abspath(path)
|
|
|
|
fd, tmp_path = tempfile.mkstemp(suffix='.tmp',
|
|
|
|
prefix='.ansible_mitogen_transfer-',
|
|
|
|
dir=os.path.dirname(path))
|
|
|
|
fp = os.fdopen(fd, 'wb', mitogen.core.CHUNK_SIZE)
|
2018-05-13 02:18:22 +00:00
|
|
|
LOG.debug('write_path(path=%r) temporary file: %s', path, tmp_path)
|
2018-04-28 19:11:03 +00:00
|
|
|
|
|
|
|
try:
|
|
|
|
try:
|
|
|
|
if mode:
|
2019-01-23 12:44:08 +00:00
|
|
|
set_file_mode(tmp_path, mode, fd=fp.fileno())
|
2018-04-28 19:11:03 +00:00
|
|
|
if owner or group:
|
2019-01-23 12:44:08 +00:00
|
|
|
set_file_owner(tmp_path, owner, group, fd=fp.fileno())
|
2018-04-28 19:11:03 +00:00
|
|
|
fp.write(s)
|
|
|
|
finally:
|
|
|
|
fp.close()
|
|
|
|
|
|
|
|
if sync:
|
|
|
|
os.fsync(fp.fileno())
|
2018-04-28 21:09:07 +00:00
|
|
|
os.rename(tmp_path, path)
|
|
|
|
except BaseException:
|
2018-04-28 19:11:03 +00:00
|
|
|
os.unlink(tmp_path)
|
|
|
|
raise
|
|
|
|
|
|
|
|
if utimes:
|
2018-04-28 21:09:07 +00:00
|
|
|
os.utime(path, utimes)
|
2018-02-27 09:30:41 +00:00
|
|
|
|
|
|
|
|
2018-02-27 12:11:25 +00:00
|
|
|
CHMOD_CLAUSE_PAT = re.compile(r'([uoga]*)([+\-=])([ugo]|[rwx]*)')
|
2018-02-27 09:30:41 +00:00
|
|
|
CHMOD_MASKS = {
|
|
|
|
'u': stat.S_IRWXU,
|
|
|
|
'g': stat.S_IRWXG,
|
|
|
|
'o': stat.S_IRWXO,
|
|
|
|
'a': (stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO),
|
|
|
|
}
|
|
|
|
CHMOD_BITS = {
|
2018-03-26 05:58:06 +00:00
|
|
|
'u': {'r': stat.S_IRUSR, 'w': stat.S_IWUSR, 'x': stat.S_IXUSR},
|
|
|
|
'g': {'r': stat.S_IRGRP, 'w': stat.S_IWGRP, 'x': stat.S_IXGRP},
|
|
|
|
'o': {'r': stat.S_IROTH, 'w': stat.S_IWOTH, 'x': stat.S_IXOTH},
|
2018-02-27 09:30:41 +00:00
|
|
|
'a': {
|
|
|
|
'r': (stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH),
|
|
|
|
'w': (stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH),
|
|
|
|
'x': (stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
def apply_mode_spec(spec, mode):
|
2018-02-27 15:16:05 +00:00
|
|
|
"""
|
|
|
|
Given a symbolic file mode change specification in the style of chmod(1)
|
|
|
|
`spec`, apply changes in the specification to the numeric file mode `mode`.
|
|
|
|
"""
|
2019-01-19 01:51:54 +00:00
|
|
|
for clause in mitogen.core.to_text(spec).split(','):
|
2018-02-27 09:30:41 +00:00
|
|
|
match = CHMOD_CLAUSE_PAT.match(clause)
|
|
|
|
who, op, perms = match.groups()
|
|
|
|
for ch in who or 'a':
|
2018-02-27 12:11:25 +00:00
|
|
|
mask = CHMOD_MASKS[ch]
|
|
|
|
bits = CHMOD_BITS[ch]
|
2018-02-27 09:30:41 +00:00
|
|
|
cur_perm_bits = mode & mask
|
2019-01-23 12:44:08 +00:00
|
|
|
new_perm_bits = reduce(operator.or_, (bits[p] for p in perms), 0)
|
2018-02-27 09:30:41 +00:00
|
|
|
mode &= ~mask
|
|
|
|
if op == '=':
|
|
|
|
mode |= new_perm_bits
|
|
|
|
elif op == '+':
|
2018-02-27 12:11:25 +00:00
|
|
|
mode |= new_perm_bits | cur_perm_bits
|
2018-02-27 09:30:41 +00:00
|
|
|
else:
|
|
|
|
mode |= cur_perm_bits & ~new_perm_bits
|
|
|
|
return mode
|
|
|
|
|
|
|
|
|
2019-01-23 12:44:08 +00:00
|
|
|
def set_file_mode(path, spec, fd=None):
|
2018-02-27 09:30:41 +00:00
|
|
|
"""
|
|
|
|
Update the permissions of a file using the same syntax as chmod(1).
|
|
|
|
"""
|
2019-01-23 12:44:08 +00:00
|
|
|
if isinstance(spec, int):
|
|
|
|
new_mode = spec
|
2019-01-23 12:44:08 +00:00
|
|
|
elif not mitogen.core.PY3 and isinstance(spec, long):
|
2019-01-23 12:44:08 +00:00
|
|
|
new_mode = spec
|
|
|
|
elif spec.isdigit():
|
2018-02-27 09:30:41 +00:00
|
|
|
new_mode = int(spec, 8)
|
|
|
|
else:
|
2019-01-23 12:44:08 +00:00
|
|
|
mode = os.stat(path).st_mode
|
2018-02-27 09:30:41 +00:00
|
|
|
new_mode = apply_mode_spec(spec, mode)
|
|
|
|
|
2019-01-23 12:44:08 +00:00
|
|
|
if fd is not None and hasattr(os, 'fchmod'):
|
|
|
|
os.fchmod(fd, new_mode)
|
|
|
|
else:
|
|
|
|
os.chmod(path, new_mode)
|
2019-01-27 03:00:46 +00:00
|
|
|
|
|
|
|
|
|
|
|
def file_exists(path):
|
|
|
|
"""
|
|
|
|
Return :data:`True` if `path` exists. This is a wrapper function over
|
|
|
|
:func:`os.path.exists`, since its implementation module varies across
|
|
|
|
Python versions.
|
|
|
|
"""
|
|
|
|
return os.path.exists(path)
|