summaryrefslogtreecommitdiff
path: root/morphlib/bins.py
blob: 1ce5a0ebe13f503a1fc0a32b16df0eef502004c6 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
# Copyright (C) 2011-2014  Codethink Limited
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.


'''Functions for dealing with Baserock binaries.

Binaries are chunks, strata, and system images.

'''


import cliapp
import logging
import os
import re
import errno
import stat
import shutil
import tarfile
import functools
import hashlib

import morphlib

from morphlib.extractedtarball import ExtractedTarball
from morphlib.mountableimage import MountableImage


# Work around http://bugs.python.org/issue16477
def safe_makefile(self, tarinfo, targetpath):
    '''Create a file, closing correctly in case of exception'''

    source = self.extractfile(tarinfo)
    try:
        with open(targetpath, "wb") as target:
            shutil.copyfileobj(source, target)
    finally:
        source.close()

tarfile.TarFile.makefile = safe_makefile


class HashedOutputStream(object):
    '''Wrap a stream object and hash all data that is written.

    We use SHA1 as the hash. Currently this is only used to guard against
    corruption, but if we can ensure that our builds are bit-for-bit reprocible
    then it could provide a level of tampering-detection as good as that of
    Git.

    While using a non-secure checksum like Adler32 would be faster, the
    implementations of these in Python's zlib module don't allow calculating a
    rolling checksum, so a custom C implementation would be needed.

    In a rough benchmark, calculating a SHA1 hash during tarfile creation
    using this method slowed throughput by about 5%.

    '''
    def __init__(self, f):
        self.f = f
        self.hasher = hashlib.sha1()

    def read(self, *args, **kwargs):
        raise NotImplementedError(
            'Attempted to read from a write-only stream.')

    def write(self, data, *args, **kwargs):
        self.f.write(data, *args, **kwargs)
        self.hasher.update(data)

    # FIXME: rename (hash is a builtin)
    def hash(self):
        return self.hasher.hexdigest()


def create_chunk(rootdir, f, include, dump_memory_profile=None):
    '''Create a chunk from the contents of a directory.
    
    ``f`` is an open file handle, to which the tar file is written.

    This function returns a checksum of the resulting file.

    '''

    dump_memory_profile = dump_memory_profile or (lambda msg: None)

    # This timestamp is used to normalize the mtime for every file in
    # chunk artifact. This is useful to avoid problems from smallish
    # clock skew. It needs to be recent enough, however, that GNU tar
    # does not complain about an implausibly old timestamp.
    normalized_timestamp = 683074800

    dump_memory_profile('at beginning of create_chunk')
    
    path_pairs = [(relname, os.path.join(rootdir, relname))
                  for relname in include]
    stream = HashedOutputStream(f)
    with tarfile.open(fileobj=stream, mode='w|') as tar:
        for relname, filename in path_pairs:
            # Normalize mtime for everything.
            tarinfo = tar.gettarinfo(filename,
                                    arcname=relname)
            tarinfo.ctime = normalized_timestamp
            tarinfo.mtime = normalized_timestamp
            if tarinfo.isreg():
                with open(filename, 'rb') as f:
                    tar.addfile(tarinfo, fileobj=f)
            else:
                tar.addfile(tarinfo)

    for relname, filename in reversed(path_pairs):
        if os.path.isdir(filename) and not os.path.islink(filename):
            continue
        else:
            os.remove(filename)
    dump_memory_profile('after removing in create_chunks')

    return stream.hash()


def make_tarinfo_path_relative_to(root, info):
    '''Strip rootdir from a file's path before adding to a tarfile.'''

    # tar.gettarinfo() makes all paths relative, we must follow that.
    root = root.lstrip('/')
    info.name = os.path.relpath(info.name, root)
    if info.islnk():
        info.linkname = os.path.relpath(info.linkname, root)
    return info


def create_chunk_2(rootdir, f, name, include):
    '''Create a chunk artifact, new way.

    Output should be identical to create_chunk(), but it doesn't delete the
    files after creating the chunk, and doesn't require the caller to work
    out all the files that should go in. (But it does that because of chunk
    splitting!!! *OH*.....)
    '''

    # This timestamp is used to normalize the mtime for every file in
    # chunk artifact. This is useful to avoid problems from smallish
    # clock skew. It needs to be recent enough, however, that GNU tar
    # does not complain about an implausibly old timestamp.
    normalized_timestamp = 683074800

    stream = HashedOutputStream(f)
    with tarfile.open(fileobj=stream, mode='w|') as tar:
        for filepath in sorted(include):
            if filepath == rootdir:
                # I'm not sure how the ChunkBuilder.assemble_chunk_artifact()
                # code path manages to avoid adding '.' to the tarfile, but it
                # does
                continue
            # Normalize mtime for everything.
            tarinfo = tar.gettarinfo(filepath)
            tarinfo = make_tarinfo_path_relative_to(rootdir, tarinfo)
            tarinfo.ctime = normalized_timestamp
            tarinfo.mtime = normalized_timestamp
            if tarinfo.isreg():
                # FIXME: why this?
                with open(filepath, 'rb') as f:
                    tar.addfile(tarinfo, fileobj=f)
            else:
                tar.addfile(tarinfo)

    return stream.hash()


def create_system(rootdir, f, name):
    '''Create a system artifact from the contents of a directory.

    '''

    stream = HashedOutputStream(f)

    path_filter = functools.partial(make_tarinfo_path_relative_to, rootdir)
    with tarfile.open(fileobj=stream, mode="w|", name=name) as tar:
        tar.add(rootdir, recursive=True, filter=path_filter)

    return stream.hash


def unpack_binary_from_file(f, dirname):  # pragma: no cover
    '''Unpack a binary into a directory.

    The directory must exist already.

    '''

    # This is evil, but necessary. For some reason Python's system
    # call wrappers (os.mknod and such) do not (always?) set the
    # filename attribute of the OSError exception they raise. We
    # fix that by monkey patching the tf instance with wrappers
    # for the relevant methods to add things. The wrapper further
    # ignores EEXIST errors, since we do not (currently!) care about
    # overwriting files.

    def follow_symlink(path):  # pragma: no cover
        try:
            return os.stat(path)
        except OSError:
            return None

    def prepare_extract(tarinfo, targetpath):  # pragma: no cover
        '''Prepare to extract a tar file member onto targetpath?

        If the target already exist, and we can live with it or
        remove it, we do so. Otherwise, raise an error.

        It's OK to extract if:

        * the target does not exist
        * the member is a directory a directory and the
          target is a directory or a symlink to a directory
          (just extract, no need to remove)
        * the member is not a directory, and the target is not a directory
          or a symlink to a directory (remove target, then extract)

        '''

        try:
            existing = os.lstat(targetpath)
        except OSError:
            return True  # target does not exist

        if tarinfo.isdir():
            if stat.S_ISDIR(existing.st_mode):
                return True
            elif stat.S_ISLNK(existing.st_mode):
                st = follow_symlink(targetpath)
                return st and stat.S_ISDIR(st.st_mode)
        else:
            if stat.S_ISDIR(existing.st_mode):
                return False
            elif stat.S_ISLNK(existing.st_mode):
                st = follow_symlink(targetpath)
                if st and not stat.S_ISDIR(st.st_mode):
                    os.remove(targetpath)
                    return True
            else:
                os.remove(targetpath)
                return True
        return False

    def monkey_patcher(real):
        def make_something(tarinfo, targetpath):  # pragma: no cover
            prepare_extract(tarinfo, targetpath)
            try:
                ret = real(tarinfo, targetpath)
            except (IOError, OSError), e:
                if e.errno != errno.EEXIST:
                    if e.filename is None:
                        e.filename = targetpath
                    raise
            else:
                return ret
        return make_something

    tf = tarfile.open(fileobj=f, errorlevel=2)
    tf.makedir = monkey_patcher(tf.makedir)
    tf.makefile = monkey_patcher(tf.makefile)
    tf.makeunknown = monkey_patcher(tf.makeunknown)
    tf.makefifo = monkey_patcher(tf.makefifo)
    tf.makedev = monkey_patcher(tf.makedev)
    tf.makelink = monkey_patcher(tf.makelink)

    try:
        tf.extractall(path=dirname)
    finally:
        tf.close()


def unpack_binary(filename, dirname):
    with open(filename, "rb") as f:
        unpack_binary_from_file(f, dirname)


class ArtifactNotMountableError(cliapp.AppException): # pragma: no cover

    def __init__(self, filename):
        cliapp.AppException.__init__(
                self, 'Artifact %s cannot be extracted or mounted' % filename)


def call_in_artifact_directory(app, filename, callback): # pragma: no cover
    '''Call a function in a directory the artifact is extracted/mounted in.'''

    try:
        with ExtractedTarball(app, filename) as dirname:
            callback(dirname)
    except tarfile.TarError:
        try:
            with MountableImage(app, filename) as dirname:
                callback(dirname)
        except (IOError, OSError):
            raise ArtifactNotMountableError(filename)