1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
|
#!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A tool for interacting with .pak files.
For details on the pak file format, see:
https://dev.chromium.org/developers/design-documents/linuxresourcesandlocalizedstrings
"""
import argparse
import hashlib
import os
import sys
from grit.format import data_pack
def _RepackMain(args):
data_pack.RePack(args.output_pak_file, args.input_pak_files, args.whitelist,
args.suppress_removed_key_output)
def _ExtractMain(args):
pak = data_pack.ReadDataPack(args.pak_file)
for resource_id, payload in pak.resources.iteritems():
path = os.path.join(args.output_dir, str(resource_id))
with open(path, 'w') as f:
f.write(payload)
def _CreateMain(args):
pak = {}
for name in os.listdir(args.input_dir):
try:
resource_id = int(name)
except:
continue
filename = os.path.join(args.input_dir, name)
if os.path.isfile(filename):
with open(filename, 'rb') as f:
pak[resource_id] = f.read()
data_pack.WriteDataPack(pak, args.output_pak_file, data_pack.UTF8)
def _PrintMain(args):
pak = data_pack.ReadDataPack(args.pak_file)
output = args.output
encoding = 'binary'
if pak.encoding == 1:
encoding = 'utf-8'
elif pak.encoding == 2:
encoding = 'utf-16'
else:
encoding = '?' + str(pak.encoding)
output.write('version: {}\n'.format(pak.version))
output.write('encoding: {}\n'.format(encoding))
output.write('num_resources: {}\n'.format(len(pak.resources)))
output.write('num_aliases: {}\n'.format(len(pak.aliases)))
breakdown = ', '.join('{}: {}'.format(*x) for x in pak.sizes)
output.write('total_size: {} ({})\n'.format(pak.sizes.total, breakdown))
try_decode = args.decode and encoding.startswith('utf')
# Print IDs in ascending order, since that's the order in which they appear in
# the file (order is lost by Python dict).
for resource_id in sorted(pak.resources):
data = pak.resources[resource_id]
canonical_id = pak.aliases.get(resource_id, resource_id)
desc = '<data>'
if try_decode:
try:
desc = unicode(data, encoding)
if len(desc) > 60:
desc = desc[:60] + u'...'
desc = desc.replace('\n', '\\n')
except UnicodeDecodeError:
pass
sha1 = hashlib.sha1(data).hexdigest()[:10]
output.write(
u'Entry(id={}, canonical_id={}, size={}, sha1={}): {}\n'.format(
resource_id, canonical_id, len(data), sha1, desc).encode('utf-8'))
def _ListMain(args):
pak = data_pack.ReadDataPack(args.pak_file)
for resource_id in sorted(pak.resources):
args.output.write('%d\n' % resource_id)
def main():
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawTextHelpFormatter)
sub_parsers = parser.add_subparsers()
sub_parser = sub_parsers.add_parser('repack',
help='Combines several .pak files into one.')
sub_parser.add_argument('output_pak_file', help='File to create.')
sub_parser.add_argument('input_pak_files', nargs='+',
help='Input .pak files.')
sub_parser.add_argument('--whitelist',
help='Path to a whitelist used to filter output pak file resource IDs.')
sub_parser.add_argument('--suppress-removed-key-output', action='store_true',
help='Do not log which keys were removed by the whitelist.')
sub_parser.set_defaults(func=_RepackMain)
sub_parser = sub_parsers.add_parser('extract', help='Extracts pak file')
sub_parser.add_argument('pak_file')
sub_parser.add_argument('-o', '--output-dir', default='.',
help='Directory to extract to.')
sub_parser.set_defaults(func=_ExtractMain)
sub_parser = sub_parsers.add_parser('create',
help='Creates pak file from extracted directory.')
sub_parser.add_argument('output_pak_file', help='File to create.')
sub_parser.add_argument('-i', '--input-dir', default='.',
help='Directory to create from.')
sub_parser.set_defaults(func=_CreateMain)
sub_parser = sub_parsers.add_parser('print',
help='Prints all pak IDs and contents. Useful for diffing.')
sub_parser.add_argument('pak_file')
sub_parser.add_argument('--output', type=argparse.FileType('w'),
default=sys.stdout,
help='The resource list path to write (default stdout)')
sub_parser.add_argument('--no-decode', dest='decode', action='store_false',
default=True, help='Do not print entry data.')
sub_parser.set_defaults(func=_PrintMain)
sub_parser = sub_parsers.add_parser('list-id',
help='Outputs all resource IDs to a file.')
sub_parser.add_argument('pak_file')
sub_parser.add_argument('--output', type=argparse.FileType('w'),
default=sys.stdout,
help='The resource list path to write (default stdout)')
sub_parser.set_defaults(func=_ListMain)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
elif len(sys.argv) == 2 and sys.argv[1] in actions:
parser.parse_args(sys.argv[1:] + ['-h'])
sys.exit(1)
args = parser.parse_args()
args.func(args)
if __name__ == '__main__':
main()
|